| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86 |
| ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 |
| |
| define i64 @test_add_i64_i16_const(i16 %a) nounwind { |
| ; X86-LABEL: test_add_i64_i16_const: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: addl $42, %eax |
| ; X86-NEXT: xorl %edx, %edx |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: test_add_i64_i16_const: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movzwl %di, %eax |
| ; X64-NEXT: addq $42, %rax |
| ; X64-NEXT: retq |
| %zext_a = zext i16 %a to i64 |
| %sum = add nuw nsw i64 %zext_a, 42 |
| ret i64 %sum |
| } |
| |
| ; TODO: First 48 bits are all zeros so we can safely truncate to 32 bit additon |
| define i64 @test_add_i64_i16_zext(i16 %a, i16 %b) nounwind { |
| ; X86-LABEL: test_add_i64_i16_zext: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: addl %ecx, %eax |
| ; X86-NEXT: xorl %edx, %edx |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: test_add_i64_i16_zext: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movzwl %di, %ecx |
| ; X64-NEXT: movzwl %si, %eax |
| ; X64-NEXT: addq %rcx, %rax |
| ; X64-NEXT: retq |
| %zext_a = zext i16 %a to i64 |
| %zext_b = zext i16 %b to i64 |
| %sum = add nuw nsw i64 %zext_a, %zext_b |
| ret i64 %sum |
| } |
| |
| ; Negative: Set the 32nd bit of a to force 64 bit addition, we do not truncate to 32 bit addition in this case |
| define i64 @negative_test_add_i64_i16(i16 %a) nounwind { |
| ; X86-LABEL: negative_test_add_i64_i16: |
| ; X86: # %bb.0: |
| ; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: addl $42, %eax |
| ; X86-NEXT: movl $1, %edx |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: negative_test_add_i64_i16: |
| ; X64: # %bb.0: |
| ; X64-NEXT: movzwl %di, %ecx |
| ; X64-NEXT: movabsq $4294967338, %rax # imm = 0x10000002A |
| ; X64-NEXT: addq %rcx, %rax |
| ; X64-NEXT: retq |
| %zext_a = zext i16 %a to i64 |
| %or_a = or i64 %zext_a, 4294967296 |
| %sum = add nuw nsw i64 %or_a, 42 |
| ret i64 %sum |
| } |
| |
| ; Negative: We don't truncate to 32 bit addition in case of sign extension |
| define i64 @negative_test_add_i64_i16_sext(i16 %a, i16 %b) nounwind { |
| ; X86-LABEL: negative_test_add_i64_i16_sext: |
| ; X86: # %bb.0: |
| ; X86-NEXT: pushl %esi |
| ; X86-NEXT: movswl {{[0-9]+}}(%esp), %ecx |
| ; X86-NEXT: movl %ecx, %esi |
| ; X86-NEXT: sarl $31, %esi |
| ; X86-NEXT: movswl {{[0-9]+}}(%esp), %eax |
| ; X86-NEXT: movl %eax, %edx |
| ; X86-NEXT: sarl $31, %edx |
| ; X86-NEXT: addl %ecx, %eax |
| ; X86-NEXT: adcl %esi, %edx |
| ; X86-NEXT: popl %esi |
| ; X86-NEXT: retl |
| ; |
| ; X64-LABEL: negative_test_add_i64_i16_sext: |
| ; X64: # %bb.0: |
| ; X64-NEXT: # kill: def $esi killed $esi def $rsi |
| ; X64-NEXT: # kill: def $edi killed $edi def $rdi |
| ; X64-NEXT: movswq %di, %rcx |
| ; X64-NEXT: movswq %si, %rax |
| ; X64-NEXT: addq %rcx, %rax |
| ; X64-NEXT: retq |
| %sext_a = sext i16 %a to i64 |
| %sext_b = sext i16 %b to i64 |
| %sum = add nuw nsw i64 %sext_a, %sext_b |
| ret i64 %sum |
| } |