blob: 543b0f13a294d98f3988281d84dd8e2e84cfd167 [file] [log] [blame]
Matt Arsenault8c4a3522018-06-26 19:10:00 +00001; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=SI -check-prefix=FUNC %s
2; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=SI -check-prefix=FUNC %s
3; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -enable-var-scope -check-prefix=EG -check-prefix=FUNC %s
Tom Stellard4489b852013-05-03 17:21:31 +00004
Tom Stellard4489b852013-05-03 17:21:31 +00005
Matt Arsenault79db0a72014-11-23 02:57:49 +00006; FUNC-LABEL: {{^}}xor_v2i32:
7; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
8; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
Aaron Watrydaabb202013-06-25 13:55:52 +00009
Matt Arsenault79db0a72014-11-23 02:57:49 +000010; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
11; SI: v_xor_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
Aaron Watrydaabb202013-06-25 13:55:52 +000012
Nikita Popovbdf2fbb2022-12-19 12:39:01 +010013define amdgpu_kernel void @xor_v2i32(ptr addrspace(1) %out, ptr addrspace(1) %in0, ptr addrspace(1) %in1) {
14 %a = load <2 x i32>, ptr addrspace(1) %in0
15 %b = load <2 x i32>, ptr addrspace(1) %in1
Aaron Watrydaabb202013-06-25 13:55:52 +000016 %result = xor <2 x i32> %a, %b
Nikita Popovbdf2fbb2022-12-19 12:39:01 +010017 store <2 x i32> %result, ptr addrspace(1) %out
Aaron Watrydaabb202013-06-25 13:55:52 +000018 ret void
19}
20
Matt Arsenault79db0a72014-11-23 02:57:49 +000021; FUNC-LABEL: {{^}}xor_v4i32:
22; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
23; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
24; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
25; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
Aaron Watrydaabb202013-06-25 13:55:52 +000026
Matt Arsenault79db0a72014-11-23 02:57:49 +000027; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
28; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
29; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
30; SI: v_xor_b32_e32 {{v[0-9]+, v[0-9]+, v[0-9]+}}
Aaron Watrydaabb202013-06-25 13:55:52 +000031
Nikita Popovbdf2fbb2022-12-19 12:39:01 +010032define amdgpu_kernel void @xor_v4i32(ptr addrspace(1) %out, ptr addrspace(1) %in0, ptr addrspace(1) %in1) {
33 %a = load <4 x i32>, ptr addrspace(1) %in0
34 %b = load <4 x i32>, ptr addrspace(1) %in1
Tom Stellard4489b852013-05-03 17:21:31 +000035 %result = xor <4 x i32> %a, %b
Nikita Popovbdf2fbb2022-12-19 12:39:01 +010036 store <4 x i32> %result, ptr addrspace(1) %out
Tom Stellard4489b852013-05-03 17:21:31 +000037 ret void
38}
Michel Danzer85222702013-08-16 16:19:31 +000039
Matt Arsenault79db0a72014-11-23 02:57:49 +000040; FUNC-LABEL: {{^}}xor_i1:
Matthias Braun97d0ffb2015-12-04 01:51:19 +000041; EG: XOR_INT {{\** *}}{{T[0-9]+\.[XYZW]}}, {{PS|PV\.[XYZW]}}, {{PS|PV\.[XYZW]}}
Michel Danzer85222702013-08-16 16:19:31 +000042
Matt Arsenault8c4a3522018-06-26 19:10:00 +000043; SI-DAG: v_cmp_le_f32_e32 [[CMP0:vcc]], 1.0, {{v[0-9]+}}
44; SI-DAG: v_cmp_le_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 0, {{v[0-9]+}}
45; SI: s_xor_b64 [[XOR:vcc]], [[CMP1]], [[CMP0]]
Tom Stellarde48fe2a2015-07-14 14:15:03 +000046; SI: v_cndmask_b32_e32 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}
Matt Arsenaultbecd6562014-12-03 05:22:35 +000047; SI: buffer_store_dword [[RESULT]]
48; SI: s_endpgm
Nikita Popovbdf2fbb2022-12-19 12:39:01 +010049define amdgpu_kernel void @xor_i1(ptr addrspace(1) %out, ptr addrspace(1) %in0, ptr addrspace(1) %in1) {
50 %a = load float, ptr addrspace(1) %in0
51 %b = load float, ptr addrspace(1) %in1
Michel Danzer85222702013-08-16 16:19:31 +000052 %acmp = fcmp oge float %a, 0.000000e+00
Matt Arsenaultbecd6562014-12-03 05:22:35 +000053 %bcmp = fcmp oge float %b, 1.000000e+00
Michel Danzer85222702013-08-16 16:19:31 +000054 %xor = xor i1 %acmp, %bcmp
55 %result = select i1 %xor, float %a, float %b
Nikita Popovbdf2fbb2022-12-19 12:39:01 +010056 store float %result, ptr addrspace(1) %out
Michel Danzer85222702013-08-16 16:19:31 +000057 ret void
58}
Matt Arsenault8e2581b2014-03-21 18:01:18 +000059
Matt Arsenaultbecd6562014-12-03 05:22:35 +000060; FUNC-LABEL: {{^}}v_xor_i1:
Matt Arsenaultbecd6562014-12-03 05:22:35 +000061; SI: buffer_load_ubyte [[B:v[0-9]+]]
Tom Stellard83f0bce2015-01-29 16:55:25 +000062; SI: buffer_load_ubyte [[A:v[0-9]+]]
Matt Arsenault6c29c5a2017-07-10 19:53:57 +000063; SI: v_xor_b32_e32 [[XOR:v[0-9]+]], [[B]], [[A]]
Matt Arsenaultbecd6562014-12-03 05:22:35 +000064; SI: v_and_b32_e32 [[RESULT:v[0-9]+]], 1, [[XOR]]
65; SI: buffer_store_byte [[RESULT]]
Nikita Popovbdf2fbb2022-12-19 12:39:01 +010066define amdgpu_kernel void @v_xor_i1(ptr addrspace(1) %out, ptr addrspace(1) %in0, ptr addrspace(1) %in1) {
67 %a = load volatile i1, ptr addrspace(1) %in0
68 %b = load volatile i1, ptr addrspace(1) %in1
Matt Arsenaultbecd6562014-12-03 05:22:35 +000069 %xor = xor i1 %a, %b
Nikita Popovbdf2fbb2022-12-19 12:39:01 +010070 store i1 %xor, ptr addrspace(1) %out
Matt Arsenaultbecd6562014-12-03 05:22:35 +000071 ret void
72}
73
Matt Arsenault79db0a72014-11-23 02:57:49 +000074; FUNC-LABEL: {{^}}vector_xor_i32:
75; SI: v_xor_b32_e32
Nikita Popovbdf2fbb2022-12-19 12:39:01 +010076define amdgpu_kernel void @vector_xor_i32(ptr addrspace(1) %out, ptr addrspace(1) %in0, ptr addrspace(1) %in1) {
77 %a = load i32, ptr addrspace(1) %in0
78 %b = load i32, ptr addrspace(1) %in1
Matt Arsenault8e2581b2014-03-21 18:01:18 +000079 %result = xor i32 %a, %b
Nikita Popovbdf2fbb2022-12-19 12:39:01 +010080 store i32 %result, ptr addrspace(1) %out
Matt Arsenault8e2581b2014-03-21 18:01:18 +000081 ret void
82}
83
Matt Arsenault79db0a72014-11-23 02:57:49 +000084; FUNC-LABEL: {{^}}scalar_xor_i32:
85; SI: s_xor_b32
Nikita Popovbdf2fbb2022-12-19 12:39:01 +010086define amdgpu_kernel void @scalar_xor_i32(ptr addrspace(1) %out, i32 %a, i32 %b) {
Matt Arsenault8e2581b2014-03-21 18:01:18 +000087 %result = xor i32 %a, %b
Nikita Popovbdf2fbb2022-12-19 12:39:01 +010088 store i32 %result, ptr addrspace(1) %out
Matt Arsenault8e2581b2014-03-21 18:01:18 +000089 ret void
90}
Matt Arsenault2c335622014-04-09 07:16:16 +000091
Matt Arsenault79db0a72014-11-23 02:57:49 +000092; FUNC-LABEL: {{^}}scalar_not_i32:
93; SI: s_not_b32
Nikita Popovbdf2fbb2022-12-19 12:39:01 +010094define amdgpu_kernel void @scalar_not_i32(ptr addrspace(1) %out, i32 %a) {
Matt Arsenault2c335622014-04-09 07:16:16 +000095 %result = xor i32 %a, -1
Nikita Popovbdf2fbb2022-12-19 12:39:01 +010096 store i32 %result, ptr addrspace(1) %out
Matt Arsenault2c335622014-04-09 07:16:16 +000097 ret void
98}
99
Matt Arsenault79db0a72014-11-23 02:57:49 +0000100; FUNC-LABEL: {{^}}vector_not_i32:
101; SI: v_not_b32
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100102define amdgpu_kernel void @vector_not_i32(ptr addrspace(1) %out, ptr addrspace(1) %in0, ptr addrspace(1) %in1) {
103 %a = load i32, ptr addrspace(1) %in0
104 %b = load i32, ptr addrspace(1) %in1
Matt Arsenault2c335622014-04-09 07:16:16 +0000105 %result = xor i32 %a, -1
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100106 store i32 %result, ptr addrspace(1) %out
Matt Arsenault2c335622014-04-09 07:16:16 +0000107 ret void
108}
Matt Arsenault689f3252014-06-09 16:36:31 +0000109
Matt Arsenault79db0a72014-11-23 02:57:49 +0000110; FUNC-LABEL: {{^}}vector_xor_i64:
111; SI: v_xor_b32_e32
112; SI: v_xor_b32_e32
113; SI: s_endpgm
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100114define amdgpu_kernel void @vector_xor_i64(ptr addrspace(1) %out, ptr addrspace(1) %in0, ptr addrspace(1) %in1) {
115 %a = load i64, ptr addrspace(1) %in0
116 %b = load i64, ptr addrspace(1) %in1
Matt Arsenault689f3252014-06-09 16:36:31 +0000117 %result = xor i64 %a, %b
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100118 store i64 %result, ptr addrspace(1) %out
Matt Arsenault689f3252014-06-09 16:36:31 +0000119 ret void
120}
121
Matt Arsenault79db0a72014-11-23 02:57:49 +0000122; FUNC-LABEL: {{^}}scalar_xor_i64:
123; SI: s_xor_b64
124; SI: s_endpgm
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100125define amdgpu_kernel void @scalar_xor_i64(ptr addrspace(1) %out, i64 %a, i64 %b) {
Matt Arsenault689f3252014-06-09 16:36:31 +0000126 %result = xor i64 %a, %b
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100127 store i64 %result, ptr addrspace(1) %out
Matt Arsenault689f3252014-06-09 16:36:31 +0000128 ret void
129}
130
Matt Arsenault79db0a72014-11-23 02:57:49 +0000131; FUNC-LABEL: {{^}}scalar_not_i64:
132; SI: s_not_b64
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100133define amdgpu_kernel void @scalar_not_i64(ptr addrspace(1) %out, i64 %a) {
Matt Arsenault689f3252014-06-09 16:36:31 +0000134 %result = xor i64 %a, -1
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100135 store i64 %result, ptr addrspace(1) %out
Matt Arsenault689f3252014-06-09 16:36:31 +0000136 ret void
137}
138
Matt Arsenault79db0a72014-11-23 02:57:49 +0000139; FUNC-LABEL: {{^}}vector_not_i64:
140; SI: v_not_b32
141; SI: v_not_b32
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100142define amdgpu_kernel void @vector_not_i64(ptr addrspace(1) %out, ptr addrspace(1) %in0, ptr addrspace(1) %in1) {
143 %a = load i64, ptr addrspace(1) %in0
144 %b = load i64, ptr addrspace(1) %in1
Matt Arsenault689f3252014-06-09 16:36:31 +0000145 %result = xor i64 %a, -1
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100146 store i64 %result, ptr addrspace(1) %out
Matt Arsenault689f3252014-06-09 16:36:31 +0000147 ret void
148}
Tom Stellardc9dedb82014-06-20 17:05:57 +0000149
150; Test that we have a pattern to match xor inside a branch.
151; Note that in the future the backend may be smart enough to
152; use an SALU instruction for this.
153
Matt Arsenault79db0a72014-11-23 02:57:49 +0000154; FUNC-LABEL: {{^}}xor_cf:
155; SI: s_xor_b64
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100156define amdgpu_kernel void @xor_cf(ptr addrspace(1) %out, ptr addrspace(1) %in, i64 %a, i64 %b) {
Tom Stellardc9dedb82014-06-20 17:05:57 +0000157entry:
158 %0 = icmp eq i64 %a, 0
159 br i1 %0, label %if, label %else
160
161if:
162 %1 = xor i64 %a, %b
163 br label %endif
164
165else:
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100166 %2 = load i64, ptr addrspace(1) %in
Tom Stellardc9dedb82014-06-20 17:05:57 +0000167 br label %endif
168
169endif:
170 %3 = phi i64 [%1, %if], [%2, %else]
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100171 store i64 %3, ptr addrspace(1) %out
Tom Stellardc9dedb82014-06-20 17:05:57 +0000172 ret void
173}
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000174
175; FUNC-LABEL: {{^}}scalar_xor_literal_i64:
Jay Foadf5100452022-01-14 11:03:21 +0000176; SI: s_load_dwordx2 s[[[LO:[0-9]+]]:[[HI:[0-9]+]]], s{{\[[0-9]+:[0-9]+\]}}, {{0x9|0x24}}
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000177; SI-DAG: s_xor_b32 s[[RES_HI:[0-9]+]], s{{[0-9]+}}, 0xf237b
178; SI-DAG: s_xor_b32 s[[RES_LO:[0-9]+]], s{{[0-9]+}}, 0x3039
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000179; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[RES_LO]]
180; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s[[RES_HI]]
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100181define amdgpu_kernel void @scalar_xor_literal_i64(ptr addrspace(1) %out, [8 x i32], i64 %a) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000182 %or = xor i64 %a, 4261135838621753
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100183 store i64 %or, ptr addrspace(1) %out
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000184 ret void
185}
186
187; FUNC-LABEL: {{^}}scalar_xor_literal_multi_use_i64:
Jay Foadf5100452022-01-14 11:03:21 +0000188; SI: s_load_dwordx4 s[[[LO:[0-9]+]]:[[HI:[0-9]+]]], s{{\[[0-9]+:[0-9]+\]}}, {{0x13|0x4c}}
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000189; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0xf237b
190; SI-DAG: s_movk_i32 s[[K_LO:[0-9]+]], 0x3039
Jay Foadf5100452022-01-14 11:03:21 +0000191; SI: s_xor_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s[[[K_LO]]:[[K_HI]]]
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000192
Jay Foad3eb22812022-05-16 15:48:11 +0100193; SI: s_add_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0x3039
194; SI: s_addc_u32 s{{[0-9]+}}, s{{[0-9]+}}, 0xf237b
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100195define amdgpu_kernel void @scalar_xor_literal_multi_use_i64(ptr addrspace(1) %out, [8 x i32], i64 %a, i64 %b) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000196 %or = xor i64 %a, 4261135838621753
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100197 store i64 %or, ptr addrspace(1) %out
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000198
199 %foo = add i64 %b, 4261135838621753
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100200 store volatile i64 %foo, ptr addrspace(1) undef
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000201 ret void
202}
203
204; FUNC-LABEL: {{^}}scalar_xor_inline_imm_i64:
Jay Foadf5100452022-01-14 11:03:21 +0000205; SI: s_load_dwordx2 s[[[VAL_LO:[0-9]+]]:[[VAL_HI:[0-9]+]]], s{{\[[0-9]+:[0-9]+\]}}, {{0x13|0x4c}}
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000206; SI-NOT: xor_b32
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000207; SI: s_xor_b32 s[[VAL_LO]], s{{[0-9]+}}, 63
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000208; SI-NOT: xor_b32
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000209; SI: v_mov_b32_e32 v[[VLO:[0-9]+]], s{{[0-9]+}}
Konstantin Zhuravlyov0a1a7b62016-11-17 16:41:49 +0000210; SI-NOT: xor_b32
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000211; SI: v_mov_b32_e32 v[[VHI:[0-9]+]], s{{[0-9]+}}
Matt Arsenault3b36bb12016-11-16 20:35:23 +0000212; SI-NOT: xor_b32
Jay Foadf5100452022-01-14 11:03:21 +0000213; SI: buffer_store_dwordx2 v[[[VLO]]:[[VHI]]]
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100214define amdgpu_kernel void @scalar_xor_inline_imm_i64(ptr addrspace(1) %out, [8 x i32], i64 %a) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000215 %or = xor i64 %a, 63
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100216 store i64 %or, ptr addrspace(1) %out
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000217 ret void
218}
219
220; FUNC-LABEL: {{^}}scalar_xor_neg_inline_imm_i64:
Matt Arsenault8c4a3522018-06-26 19:10:00 +0000221; SI: s_load_dwordx2 [[VAL:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, {{0x13|0x4c}}
222; SI: s_xor_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -8
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100223define amdgpu_kernel void @scalar_xor_neg_inline_imm_i64(ptr addrspace(1) %out, [8 x i32], i64 %a) {
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000224 %or = xor i64 %a, -8
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100225 store i64 %or, ptr addrspace(1) %out
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000226 ret void
227}
228
229; FUNC-LABEL: {{^}}vector_xor_i64_neg_inline_imm:
Jay Foadf5100452022-01-14 11:03:21 +0000230; SI: buffer_load_dwordx2 v[[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]],
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000231; SI: v_xor_b32_e32 {{v[0-9]+}}, -8, v[[LO_VREG]]
232; SI: v_xor_b32_e32 {{v[0-9]+}}, -1, {{.*}}
233; SI: s_endpgm
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100234define amdgpu_kernel void @vector_xor_i64_neg_inline_imm(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b) {
235 %loada = load i64, ptr addrspace(1) %a, align 8
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000236 %or = xor i64 %loada, -8
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100237 store i64 %or, ptr addrspace(1) %out
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000238 ret void
239}
240
241; FUNC-LABEL: {{^}}vector_xor_literal_i64:
Jay Foadf5100452022-01-14 11:03:21 +0000242; SI-DAG: buffer_load_dwordx2 v[[[LO_VREG:[0-9]+]]:[[HI_VREG:[0-9]+]]],
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000243; SI-DAG: v_xor_b32_e32 {{v[0-9]+}}, 0xdf77987f, v[[LO_VREG]]
244; SI-DAG: v_xor_b32_e32 {{v[0-9]+}}, 0x146f, v[[HI_VREG]]
245; SI: s_endpgm
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100246define amdgpu_kernel void @vector_xor_literal_i64(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b) {
247 %loada = load i64, ptr addrspace(1) %a, align 8
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000248 %or = xor i64 %loada, 22470723082367
Nikita Popovbdf2fbb2022-12-19 12:39:01 +0100249 store i64 %or, ptr addrspace(1) %out
Matt Arsenaultfa5f7672016-09-14 15:19:03 +0000250 ret void
251}