blob: b8a3ad9e63fda2bb080c076f24148a2d29137a42 [file] [log] [blame] [edit]
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 6
; RUN: opt -passes=slp-vectorizer -S -mattr=+v -mtriple=riscv64-unknown-unknown < %s | FileCheck %s
define i32 @test(ptr %pix1, i32 %i_pix1, ptr %pix2, i32 %i_pix2) {
; CHECK-LABEL: define i32 @test(
; CHECK-SAME: ptr [[PIX1:%.*]], i32 [[I_PIX1:%.*]], ptr [[PIX2:%.*]], i32 [[I_PIX2:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: [[ENTRY:.*:]]
; CHECK-NEXT: [[IDX_EXT:%.*]] = sext i32 [[I_PIX1]] to i64
; CHECK-NEXT: [[IDX_EXT31:%.*]] = sext i32 [[I_PIX2]] to i64
; CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds nuw i8, ptr [[PIX1]], i64 1
; CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds nuw i8, ptr [[PIX2]], i64 1
; CHECK-NEXT: [[ARRAYIDX10:%.*]] = getelementptr inbounds nuw i8, ptr [[PIX1]], i64 2
; CHECK-NEXT: [[ARRAYIDX12:%.*]] = getelementptr inbounds nuw i8, ptr [[PIX2]], i64 2
; CHECK-NEXT: [[ARRAYIDX15:%.*]] = getelementptr inbounds nuw i8, ptr [[PIX1]], i64 3
; CHECK-NEXT: [[ARRAYIDX17:%.*]] = getelementptr inbounds nuw i8, ptr [[PIX2]], i64 3
; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[IDX_EXT]], 1
; CHECK-NEXT: [[TMP1:%.*]] = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i64(ptr align 1 [[PIX1]], i64 [[TMP0]], <4 x i1> splat (i1 true), i32 4)
; CHECK-NEXT: [[TMP2:%.*]] = zext <4 x i8> [[TMP1]] to <4 x i32>
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[IDX_EXT31]], 1
; CHECK-NEXT: [[TMP4:%.*]] = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i64(ptr align 1 [[PIX2]], i64 [[TMP3]], <4 x i1> splat (i1 true), i32 4)
; CHECK-NEXT: [[TMP5:%.*]] = zext <4 x i8> [[TMP4]] to <4 x i32>
; CHECK-NEXT: [[TMP6:%.*]] = sub nsw <4 x i32> [[TMP2]], [[TMP5]]
; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[IDX_EXT]], 1
; CHECK-NEXT: [[TMP8:%.*]] = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i64(ptr align 1 [[ARRAYIDX3]], i64 [[TMP7]], <4 x i1> splat (i1 true), i32 4)
; CHECK-NEXT: [[TMP9:%.*]] = zext <4 x i8> [[TMP8]] to <4 x i32>
; CHECK-NEXT: [[TMP10:%.*]] = mul i64 [[IDX_EXT31]], 1
; CHECK-NEXT: [[TMP11:%.*]] = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i64(ptr align 1 [[ARRAYIDX5]], i64 [[TMP10]], <4 x i1> splat (i1 true), i32 4)
; CHECK-NEXT: [[TMP12:%.*]] = zext <4 x i8> [[TMP11]] to <4 x i32>
; CHECK-NEXT: [[TMP13:%.*]] = sub nsw <4 x i32> [[TMP9]], [[TMP12]]
; CHECK-NEXT: [[TMP14:%.*]] = add nsw <4 x i32> [[TMP13]], [[TMP6]]
; CHECK-NEXT: [[TMP15:%.*]] = sub nsw <4 x i32> [[TMP6]], [[TMP13]]
; CHECK-NEXT: [[TMP16:%.*]] = shl nsw <4 x i32> [[TMP15]], splat (i32 16)
; CHECK-NEXT: [[TMP17:%.*]] = add nsw <4 x i32> [[TMP14]], [[TMP16]]
; CHECK-NEXT: [[TMP18:%.*]] = mul i64 [[IDX_EXT]], 1
; CHECK-NEXT: [[TMP19:%.*]] = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i64(ptr align 1 [[ARRAYIDX10]], i64 [[TMP18]], <4 x i1> splat (i1 true), i32 4)
; CHECK-NEXT: [[TMP20:%.*]] = zext <4 x i8> [[TMP19]] to <4 x i32>
; CHECK-NEXT: [[TMP21:%.*]] = mul i64 [[IDX_EXT31]], 1
; CHECK-NEXT: [[TMP22:%.*]] = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i64(ptr align 1 [[ARRAYIDX12]], i64 [[TMP21]], <4 x i1> splat (i1 true), i32 4)
; CHECK-NEXT: [[TMP23:%.*]] = zext <4 x i8> [[TMP22]] to <4 x i32>
; CHECK-NEXT: [[TMP24:%.*]] = sub nsw <4 x i32> [[TMP20]], [[TMP23]]
; CHECK-NEXT: [[TMP25:%.*]] = mul i64 [[IDX_EXT]], 1
; CHECK-NEXT: [[TMP26:%.*]] = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i64(ptr align 1 [[ARRAYIDX15]], i64 [[TMP25]], <4 x i1> splat (i1 true), i32 4)
; CHECK-NEXT: [[TMP27:%.*]] = zext <4 x i8> [[TMP26]] to <4 x i32>
; CHECK-NEXT: [[TMP28:%.*]] = mul i64 [[IDX_EXT31]], 1
; CHECK-NEXT: [[TMP29:%.*]] = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0.i64(ptr align 1 [[ARRAYIDX17]], i64 [[TMP28]], <4 x i1> splat (i1 true), i32 4)
; CHECK-NEXT: [[TMP30:%.*]] = zext <4 x i8> [[TMP29]] to <4 x i32>
; CHECK-NEXT: [[TMP31:%.*]] = sub nsw <4 x i32> [[TMP27]], [[TMP30]]
; CHECK-NEXT: [[TMP32:%.*]] = add nsw <4 x i32> [[TMP31]], [[TMP24]]
; CHECK-NEXT: [[TMP33:%.*]] = sub nsw <4 x i32> [[TMP24]], [[TMP31]]
; CHECK-NEXT: [[TMP34:%.*]] = shl nsw <4 x i32> [[TMP33]], splat (i32 16)
; CHECK-NEXT: [[TMP35:%.*]] = add nsw <4 x i32> [[TMP32]], [[TMP34]]
; CHECK-NEXT: [[TMP36:%.*]] = sub nsw <4 x i32> [[TMP17]], [[TMP35]]
; CHECK-NEXT: [[TMP37:%.*]] = shufflevector <4 x i32> [[TMP36]], <4 x i32> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
; CHECK-NEXT: [[TMP38:%.*]] = add nsw <4 x i32> [[TMP35]], [[TMP17]]
; CHECK-NEXT: [[TMP39:%.*]] = shufflevector <4 x i32> [[TMP38]], <4 x i32> poison, <4 x i32> <i32 1, i32 0, i32 3, i32 2>
; CHECK-NEXT: [[TMP40:%.*]] = add nsw <4 x i32> [[TMP39]], [[TMP38]]
; CHECK-NEXT: [[TMP41:%.*]] = sub nsw <4 x i32> [[TMP39]], [[TMP38]]
; CHECK-NEXT: [[TMP42:%.*]] = shufflevector <4 x i32> [[TMP40]], <4 x i32> [[TMP41]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
; CHECK-NEXT: [[TMP43:%.*]] = shufflevector <4 x i32> [[TMP42]], <4 x i32> poison, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
; CHECK-NEXT: [[TMP44:%.*]] = add nsw <4 x i32> [[TMP42]], [[TMP43]]
; CHECK-NEXT: [[TMP45:%.*]] = sub nsw <4 x i32> [[TMP42]], [[TMP43]]
; CHECK-NEXT: [[TMP46:%.*]] = shufflevector <4 x i32> [[TMP44]], <4 x i32> [[TMP45]], <4 x i32> <i32 4, i32 5, i32 2, i32 3>
; CHECK-NEXT: [[TMP47:%.*]] = lshr <4 x i32> [[TMP46]], splat (i32 15)
; CHECK-NEXT: [[TMP48:%.*]] = and <4 x i32> [[TMP47]], splat (i32 65537)
; CHECK-NEXT: [[TMP49:%.*]] = mul nuw <4 x i32> [[TMP48]], splat (i32 65535)
; CHECK-NEXT: [[TMP50:%.*]] = add <4 x i32> [[TMP49]], [[TMP46]]
; CHECK-NEXT: [[TMP117:%.*]] = xor <4 x i32> [[TMP50]], [[TMP49]]
; CHECK-NEXT: [[TMP118:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP117]])
; CHECK-NEXT: [[CONV78:%.*]] = and i32 [[TMP118]], 65535
; CHECK-NEXT: [[SHR:%.*]] = lshr i32 [[TMP118]], 16
; CHECK-NEXT: [[ADD80:%.*]] = add nuw nsw i32 [[SHR]], [[CONV78]]
; CHECK-NEXT: [[TMP53:%.*]] = add nsw <4 x i32> [[TMP37]], [[TMP36]]
; CHECK-NEXT: [[TMP54:%.*]] = sub nsw <4 x i32> [[TMP37]], [[TMP36]]
; CHECK-NEXT: [[TMP55:%.*]] = shufflevector <4 x i32> [[TMP53]], <4 x i32> [[TMP54]], <4 x i32> <i32 0, i32 5, i32 2, i32 7>
; CHECK-NEXT: [[TMP56:%.*]] = shufflevector <4 x i32> [[TMP55]], <4 x i32> poison, <4 x i32> <i32 2, i32 3, i32 0, i32 1>
; CHECK-NEXT: [[TMP57:%.*]] = add nsw <4 x i32> [[TMP55]], [[TMP56]]
; CHECK-NEXT: [[TMP58:%.*]] = sub nsw <4 x i32> [[TMP55]], [[TMP56]]
; CHECK-NEXT: [[TMP59:%.*]] = shufflevector <4 x i32> [[TMP57]], <4 x i32> [[TMP58]], <4 x i32> <i32 4, i32 5, i32 2, i32 3>
; CHECK-NEXT: [[TMP60:%.*]] = lshr <4 x i32> [[TMP59]], splat (i32 15)
; CHECK-NEXT: [[TMP61:%.*]] = and <4 x i32> [[TMP60]], splat (i32 65537)
; CHECK-NEXT: [[TMP62:%.*]] = mul nuw <4 x i32> [[TMP61]], splat (i32 65535)
; CHECK-NEXT: [[TMP63:%.*]] = add <4 x i32> [[TMP62]], [[TMP59]]
; CHECK-NEXT: [[TMP64:%.*]] = xor <4 x i32> [[TMP63]], [[TMP62]]
; CHECK-NEXT: [[TMP65:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[TMP64]])
; CHECK-NEXT: [[CONV78_1:%.*]] = and i32 [[TMP65]], 65535
; CHECK-NEXT: [[SHR_1:%.*]] = lshr i32 [[TMP65]], 16
; CHECK-NEXT: [[ADD79_1:%.*]] = add nuw nsw i32 [[SHR_1]], [[ADD80]]
; CHECK-NEXT: [[ADD80_1:%.*]] = add nuw nsw i32 [[ADD79_1]], [[CONV78_1]]
; CHECK-NEXT: [[SHR84:%.*]] = lshr i32 [[ADD80_1]], 1
; CHECK-NEXT: ret i32 [[SHR84]]
;
entry:
%idx.ext = sext i32 %i_pix1 to i64
%idx.ext31 = sext i32 %i_pix2 to i64
%0 = load i8, ptr %pix1, align 1
%conv = zext i8 %0 to i32
%1 = load i8, ptr %pix2, align 1
%conv2 = zext i8 %1 to i32
%sub = sub nsw i32 %conv, %conv2
%arrayidx3 = getelementptr inbounds nuw i8, ptr %pix1, i64 1
%2 = load i8, ptr %arrayidx3, align 1
%conv4 = zext i8 %2 to i32
%arrayidx5 = getelementptr inbounds nuw i8, ptr %pix2, i64 1
%3 = load i8, ptr %arrayidx5, align 1
%conv6 = zext i8 %3 to i32
%sub7 = sub nsw i32 %conv4, %conv6
%add = add nsw i32 %sub7, %sub
%sub8 = sub nsw i32 %sub, %sub7
%shl = shl nsw i32 %sub8, 16
%add9 = add nsw i32 %add, %shl
%arrayidx10 = getelementptr inbounds nuw i8, ptr %pix1, i64 2
%4 = load i8, ptr %arrayidx10, align 1
%conv11 = zext i8 %4 to i32
%arrayidx12 = getelementptr inbounds nuw i8, ptr %pix2, i64 2
%5 = load i8, ptr %arrayidx12, align 1
%conv13 = zext i8 %5 to i32
%sub14 = sub nsw i32 %conv11, %conv13
%arrayidx15 = getelementptr inbounds nuw i8, ptr %pix1, i64 3
%6 = load i8, ptr %arrayidx15, align 1
%conv16 = zext i8 %6 to i32
%arrayidx17 = getelementptr inbounds nuw i8, ptr %pix2, i64 3
%7 = load i8, ptr %arrayidx17, align 1
%conv18 = zext i8 %7 to i32
%sub19 = sub nsw i32 %conv16, %conv18
%add20 = add nsw i32 %sub19, %sub14
%sub21 = sub nsw i32 %sub14, %sub19
%shl22 = shl nsw i32 %sub21, 16
%add23 = add nsw i32 %add20, %shl22
%add24 = add nsw i32 %add23, %add9
%sub27 = sub nsw i32 %add9, %add23
%add.ptr = getelementptr inbounds i8, ptr %pix1, i64 %idx.ext
%add.ptr32 = getelementptr inbounds i8, ptr %pix2, i64 %idx.ext31
%8 = load i8, ptr %add.ptr, align 1
%conv.1 = zext i8 %8 to i32
%9 = load i8, ptr %add.ptr32, align 1
%conv2.1 = zext i8 %9 to i32
%sub.1 = sub nsw i32 %conv.1, %conv2.1
%arrayidx3.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 1
%10 = load i8, ptr %arrayidx3.1, align 1
%conv4.1 = zext i8 %10 to i32
%arrayidx5.1 = getelementptr inbounds nuw i8, ptr %add.ptr32, i64 1
%11 = load i8, ptr %arrayidx5.1, align 1
%conv6.1 = zext i8 %11 to i32
%sub7.1 = sub nsw i32 %conv4.1, %conv6.1
%add.1 = add nsw i32 %sub7.1, %sub.1
%sub8.1 = sub nsw i32 %sub.1, %sub7.1
%shl.1 = shl nsw i32 %sub8.1, 16
%add9.1 = add nsw i32 %add.1, %shl.1
%arrayidx10.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 2
%12 = load i8, ptr %arrayidx10.1, align 1
%conv11.1 = zext i8 %12 to i32
%arrayidx12.1 = getelementptr inbounds nuw i8, ptr %add.ptr32, i64 2
%13 = load i8, ptr %arrayidx12.1, align 1
%conv13.1 = zext i8 %13 to i32
%sub14.1 = sub nsw i32 %conv11.1, %conv13.1
%arrayidx15.1 = getelementptr inbounds nuw i8, ptr %add.ptr, i64 3
%14 = load i8, ptr %arrayidx15.1, align 1
%conv16.1 = zext i8 %14 to i32
%arrayidx17.1 = getelementptr inbounds nuw i8, ptr %add.ptr32, i64 3
%15 = load i8, ptr %arrayidx17.1, align 1
%conv18.1 = zext i8 %15 to i32
%sub19.1 = sub nsw i32 %conv16.1, %conv18.1
%add20.1 = add nsw i32 %sub19.1, %sub14.1
%sub21.1 = sub nsw i32 %sub14.1, %sub19.1
%shl22.1 = shl nsw i32 %sub21.1, 16
%add23.1 = add nsw i32 %add20.1, %shl22.1
%add24.1 = add nsw i32 %add23.1, %add9.1
%sub27.1 = sub nsw i32 %add9.1, %add23.1
%add.ptr.1 = getelementptr inbounds i8, ptr %add.ptr, i64 %idx.ext
%add.ptr32.1 = getelementptr inbounds i8, ptr %add.ptr32, i64 %idx.ext31
%16 = load i8, ptr %add.ptr.1, align 1
%conv.2 = zext i8 %16 to i32
%17 = load i8, ptr %add.ptr32.1, align 1
%conv2.2 = zext i8 %17 to i32
%sub.2 = sub nsw i32 %conv.2, %conv2.2
%arrayidx3.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 1
%18 = load i8, ptr %arrayidx3.2, align 1
%conv4.2 = zext i8 %18 to i32
%arrayidx5.2 = getelementptr inbounds nuw i8, ptr %add.ptr32.1, i64 1
%19 = load i8, ptr %arrayidx5.2, align 1
%conv6.2 = zext i8 %19 to i32
%sub7.2 = sub nsw i32 %conv4.2, %conv6.2
%add.2 = add nsw i32 %sub7.2, %sub.2
%sub8.2 = sub nsw i32 %sub.2, %sub7.2
%shl.2 = shl nsw i32 %sub8.2, 16
%add9.2 = add nsw i32 %add.2, %shl.2
%arrayidx10.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 2
%20 = load i8, ptr %arrayidx10.2, align 1
%conv11.2 = zext i8 %20 to i32
%arrayidx12.2 = getelementptr inbounds nuw i8, ptr %add.ptr32.1, i64 2
%21 = load i8, ptr %arrayidx12.2, align 1
%conv13.2 = zext i8 %21 to i32
%sub14.2 = sub nsw i32 %conv11.2, %conv13.2
%arrayidx15.2 = getelementptr inbounds nuw i8, ptr %add.ptr.1, i64 3
%22 = load i8, ptr %arrayidx15.2, align 1
%conv16.2 = zext i8 %22 to i32
%arrayidx17.2 = getelementptr inbounds nuw i8, ptr %add.ptr32.1, i64 3
%23 = load i8, ptr %arrayidx17.2, align 1
%conv18.2 = zext i8 %23 to i32
%sub19.2 = sub nsw i32 %conv16.2, %conv18.2
%add20.2 = add nsw i32 %sub19.2, %sub14.2
%sub21.2 = sub nsw i32 %sub14.2, %sub19.2
%shl22.2 = shl nsw i32 %sub21.2, 16
%add23.2 = add nsw i32 %add20.2, %shl22.2
%add24.2 = add nsw i32 %add23.2, %add9.2
%sub27.2 = sub nsw i32 %add9.2, %add23.2
%add.ptr.2 = getelementptr inbounds i8, ptr %add.ptr.1, i64 %idx.ext
%add.ptr32.2 = getelementptr inbounds i8, ptr %add.ptr32.1, i64 %idx.ext31
%24 = load i8, ptr %add.ptr.2, align 1
%conv.3 = zext i8 %24 to i32
%25 = load i8, ptr %add.ptr32.2, align 1
%conv2.3 = zext i8 %25 to i32
%sub.3 = sub nsw i32 %conv.3, %conv2.3
%arrayidx3.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 1
%26 = load i8, ptr %arrayidx3.3, align 1
%conv4.3 = zext i8 %26 to i32
%arrayidx5.3 = getelementptr inbounds nuw i8, ptr %add.ptr32.2, i64 1
%27 = load i8, ptr %arrayidx5.3, align 1
%conv6.3 = zext i8 %27 to i32
%sub7.3 = sub nsw i32 %conv4.3, %conv6.3
%add.3 = add nsw i32 %sub7.3, %sub.3
%sub8.3 = sub nsw i32 %sub.3, %sub7.3
%shl.3 = shl nsw i32 %sub8.3, 16
%add9.3 = add nsw i32 %add.3, %shl.3
%arrayidx10.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 2
%28 = load i8, ptr %arrayidx10.3, align 1
%conv11.3 = zext i8 %28 to i32
%arrayidx12.3 = getelementptr inbounds nuw i8, ptr %add.ptr32.2, i64 2
%29 = load i8, ptr %arrayidx12.3, align 1
%conv13.3 = zext i8 %29 to i32
%sub14.3 = sub nsw i32 %conv11.3, %conv13.3
%arrayidx15.3 = getelementptr inbounds nuw i8, ptr %add.ptr.2, i64 3
%30 = load i8, ptr %arrayidx15.3, align 1
%conv16.3 = zext i8 %30 to i32
%arrayidx17.3 = getelementptr inbounds nuw i8, ptr %add.ptr32.2, i64 3
%31 = load i8, ptr %arrayidx17.3, align 1
%conv18.3 = zext i8 %31 to i32
%sub19.3 = sub nsw i32 %conv16.3, %conv18.3
%add20.3 = add nsw i32 %sub19.3, %sub14.3
%sub21.3 = sub nsw i32 %sub14.3, %sub19.3
%shl22.3 = shl nsw i32 %sub21.3, 16
%add23.3 = add nsw i32 %add20.3, %shl22.3
%add24.3 = add nsw i32 %add23.3, %add9.3
%sub27.3 = sub nsw i32 %add9.3, %add23.3
%add45 = add nsw i32 %add24.1, %add24
%sub52 = sub nsw i32 %add24, %add24.1
%add59 = add nsw i32 %add24.3, %add24.2
%sub66 = sub nsw i32 %add24.2, %add24.3
%add67 = add nsw i32 %add59, %add45
%sub68 = sub nsw i32 %add45, %add59
%add69 = add nsw i32 %sub66, %sub52
%sub70 = sub nsw i32 %sub52, %sub66
%shr.i = lshr i32 %add67, 15
%and.i = and i32 %shr.i, 65537
%mul.i = mul nuw i32 %and.i, 65535
%add.i = add i32 %mul.i, %add67
%xor.i = xor i32 %add.i, %mul.i
%shr.i122 = lshr i32 %add69, 15
%and.i123 = and i32 %shr.i122, 65537
%mul.i124 = mul nuw i32 %and.i123, 65535
%add.i125 = add i32 %mul.i124, %add69
%xor.i126 = xor i32 %add.i125, %mul.i124
%add72 = add i32 %xor.i, %xor.i126
%shr.i127 = lshr i32 %sub68, 15
%and.i128 = and i32 %shr.i127, 65537
%mul.i129 = mul nuw i32 %and.i128, 65535
%add.i130 = add i32 %mul.i129, %sub68
%xor.i131 = xor i32 %add.i130, %mul.i129
%add74 = add i32 %add72, %xor.i131
%shr.i132 = lshr i32 %sub70, 15
%and.i133 = and i32 %shr.i132, 65537
%mul.i134 = mul nuw i32 %and.i133, 65535
%add.i135 = add i32 %mul.i134, %sub70
%xor.i136 = xor i32 %add.i135, %mul.i134
%add76 = add i32 %add74, %xor.i136
%conv78 = and i32 %add76, 65535
%shr = lshr i32 %add76, 16
%add80 = add nuw nsw i32 %shr, %conv78
%add45.1 = add nsw i32 %sub27.1, %sub27
%sub52.1 = sub nsw i32 %sub27, %sub27.1
%add59.1 = add nsw i32 %sub27.3, %sub27.2
%sub66.1 = sub nsw i32 %sub27.2, %sub27.3
%add67.1 = add nsw i32 %add59.1, %add45.1
%sub68.1 = sub nsw i32 %add45.1, %add59.1
%add69.1 = add nsw i32 %sub66.1, %sub52.1
%sub70.1 = sub nsw i32 %sub52.1, %sub66.1
%shr.i.1 = lshr i32 %add67.1, 15
%and.i.1 = and i32 %shr.i.1, 65537
%mul.i.1 = mul nuw i32 %and.i.1, 65535
%add.i.1 = add i32 %mul.i.1, %add67.1
%xor.i.1 = xor i32 %add.i.1, %mul.i.1
%shr.i122.1 = lshr i32 %add69.1, 15
%and.i123.1 = and i32 %shr.i122.1, 65537
%mul.i124.1 = mul nuw i32 %and.i123.1, 65535
%add.i125.1 = add i32 %mul.i124.1, %add69.1
%xor.i126.1 = xor i32 %add.i125.1, %mul.i124.1
%add72.1 = add i32 %xor.i.1, %xor.i126.1
%shr.i127.1 = lshr i32 %sub68.1, 15
%and.i128.1 = and i32 %shr.i127.1, 65537
%mul.i129.1 = mul nuw i32 %and.i128.1, 65535
%add.i130.1 = add i32 %mul.i129.1, %sub68.1
%xor.i131.1 = xor i32 %add.i130.1, %mul.i129.1
%add74.1 = add i32 %add72.1, %xor.i131.1
%shr.i132.1 = lshr i32 %sub70.1, 15
%and.i133.1 = and i32 %shr.i132.1, 65537
%mul.i134.1 = mul nuw i32 %and.i133.1, 65535
%add.i135.1 = add i32 %mul.i134.1, %sub70.1
%xor.i136.1 = xor i32 %add.i135.1, %mul.i134.1
%add76.1 = add i32 %add74.1, %xor.i136.1
%conv78.1 = and i32 %add76.1, 65535
%shr.1 = lshr i32 %add76.1, 16
%add79.1 = add nuw nsw i32 %shr.1, %add80
%add80.1 = add nuw nsw i32 %add79.1, %conv78.1
%shr84 = lshr i32 %add80.1, 1
ret i32 %shr84
}