| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --version 6 |
| ; RUN: opt --mattr=+neon,+dotprod -passes=loop-vectorize -force-vector-interleave=1 -enable-epilogue-vectorization=false -S %s | FileCheck %s |
| |
| target triple = "arm64-apple-macosx" |
| |
| define i32 @red_extended_add_incomplete_chain(ptr %start, ptr %end, i32 %offset) { |
| ; CHECK-LABEL: define i32 @red_extended_add_incomplete_chain( |
| ; CHECK-SAME: ptr [[START:%.*]], ptr [[END:%.*]], i32 [[OFFSET:%.*]]) #[[ATTR0:[0-9]+]] { |
| ; CHECK-NEXT: [[ENTRY:.*]]: |
| ; CHECK-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64 |
| ; CHECK-NEXT: [[END1:%.*]] = ptrtoint ptr [[END]] to i64 |
| ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[END1]], 1 |
| ; CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[START2]] |
| ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 16 |
| ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 16 |
| ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]] |
| ; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[START]], i64 [[N_VEC]] |
| ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[OFFSET]], i64 0 |
| ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP5:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[NEXT_GEP]], align 1 |
| ; CHECK-NEXT: [[TMP3:%.*]] = zext <16 x i8> [[WIDE_LOAD]] to <16 x i32> |
| ; CHECK-NEXT: [[TMP4:%.*]] = add <16 x i32> [[VEC_PHI]], [[TMP3]] |
| ; CHECK-NEXT: [[TMP5]] = add <16 x i32> [[TMP4]], [[BROADCAST_SPLAT]] |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 |
| ; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| ; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[TMP7:%.*]] = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> [[TMP5]]) |
| ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]] |
| ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[TMP2]], %[[MIDDLE_BLOCK]] ], [ [[START]], %[[ENTRY]] ] |
| ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP7]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] |
| ; CHECK-NEXT: br label %[[LOOP:.*]] |
| ; CHECK: [[LOOP]]: |
| ; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[GEP_IV_NEXT:%.*]], %[[LOOP]] ] |
| ; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[RED_NEXT:%.*]], %[[LOOP]] ] |
| ; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[PTR_IV]], align 1 |
| ; CHECK-NEXT: [[L_EXT:%.*]] = zext i8 [[L]] to i32 |
| ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[RED]], [[L_EXT]] |
| ; CHECK-NEXT: [[RED_NEXT]] = add i32 [[ADD]], [[OFFSET]] |
| ; CHECK-NEXT: [[GEP_IV_NEXT]] = getelementptr i8, ptr [[PTR_IV]], i64 1 |
| ; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[PTR_IV]], [[END]] |
| ; CHECK-NEXT: br i1 [[EC]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP3:![0-9]+]] |
| ; CHECK: [[EXIT]]: |
| ; CHECK-NEXT: [[RED_NEXT_LCSSA:%.*]] = phi i32 [ [[RED_NEXT]], %[[LOOP]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] |
| ; CHECK-NEXT: ret i32 [[RED_NEXT_LCSSA]] |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %ptr.iv = phi ptr [ %start, %entry ], [ %gep.iv.next, %loop ] |
| %red = phi i32 [ 0, %entry ], [ %red.next, %loop ] |
| %l = load i8, ptr %ptr.iv, align 1 |
| %l.ext = zext i8 %l to i32 |
| %add = add i32 %red, %l.ext |
| %red.next = add i32 %add, %offset |
| %gep.iv.next = getelementptr i8, ptr %ptr.iv, i64 1 |
| %ec = icmp eq ptr %ptr.iv, %end |
| br i1 %ec, label %exit, label %loop |
| |
| exit: |
| ret i32 %red.next |
| } |
| |
| define i16 @test_incomplete_chain_without_mul(ptr noalias %dst, ptr %A, ptr %B) #0 { |
| ; CHECK-LABEL: define i16 @test_incomplete_chain_without_mul( |
| ; CHECK-SAME: ptr noalias [[DST:%.*]], ptr [[A:%.*]], ptr [[B:%.*]]) #[[ATTR1:[0-9]+]] { |
| ; CHECK-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEXT: br label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i16> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr [[A]], align 1 |
| ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i8> poison, i8 [[TMP0]], i64 0 |
| ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT]], <16 x i8> poison, <16 x i32> zeroinitializer |
| ; CHECK-NEXT: [[TMP1:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT]] to <16 x i16> |
| ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <16 x i16> [[TMP1]], i32 15 |
| ; CHECK-NEXT: store i16 [[TMP2]], ptr [[DST]], align 2 |
| ; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[B]], align 1 |
| ; CHECK-NEXT: [[BROADCAST_SPLATINSERT1:%.*]] = insertelement <16 x i8> poison, i8 [[TMP3]], i64 0 |
| ; CHECK-NEXT: [[BROADCAST_SPLAT2:%.*]] = shufflevector <16 x i8> [[BROADCAST_SPLATINSERT1]], <16 x i8> poison, <16 x i32> zeroinitializer |
| ; CHECK-NEXT: [[TMP4:%.*]] = zext <16 x i8> [[BROADCAST_SPLAT2]] to <16 x i16> |
| ; CHECK-NEXT: [[TMP5:%.*]] = add <16 x i16> [[VEC_PHI]], [[TMP4]] |
| ; CHECK-NEXT: [[TMP6:%.*]] = add <16 x i16> [[TMP5]], [[TMP1]] |
| ; CHECK-NEXT: [[TMP7]] = add <16 x i16> [[TMP6]], [[TMP4]] |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 |
| ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 |
| ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[TMP9:%.*]] = call i16 @llvm.vector.reduce.add.v16i16(<16 x i16> [[TMP7]]) |
| ; CHECK-NEXT: br label %[[EXIT:.*]] |
| ; CHECK: [[EXIT]]: |
| ; CHECK-NEXT: ret i16 [[TMP9]] |
| ; |
| entry: |
| br label %loop |
| |
| loop: |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] |
| %red = phi i16 [ 0, %entry ], [ %red.next, %loop ] |
| %l.a = load i8, ptr %A, align 1 |
| %a.ext = zext i8 %l.a to i16 |
| store i16 %a.ext, ptr %dst, align 2 |
| %l.b = load i8, ptr %B, align 1 |
| %b.ext = zext i8 %l.b to i16 |
| %add = add i16 %red, %b.ext |
| %add.1 = add i16 %add, %a.ext |
| %red.next = add i16 %add.1, %b.ext |
| %iv.next = add i64 %iv, 1 |
| %ec = icmp ult i64 %iv.next, 1024 |
| br i1 %ec, label %loop, label %exit |
| |
| exit: |
| ret i16 %red.next |
| } |
| |
| define void @chained_sext_adds(ptr noalias %src, ptr noalias %dst) #0 { |
| ; CHECK-NEON-LABEL: define void @chained_sext_adds( |
| ; CHECK-NEON-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) #[[ATTR1]] { |
| ; CHECK-NEON-NEXT: [[ENTRY:.*:]] |
| ; CHECK-NEON-NEXT: br label %[[VECTOR_PH:.*]] |
| ; CHECK-NEON: [[VECTOR_PH]]: |
| ; CHECK-NEON-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK-NEON: [[VECTOR_BODY]]: |
| ; CHECK-NEON-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEON-NEXT: [[VEC_PHI:%.*]] = phi <4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[PARTIAL_REDUCE1:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEON-NEXT: [[TMP0:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[INDEX]] |
| ; CHECK-NEON-NEXT: [[WIDE_LOAD:%.*]] = load <16 x i8>, ptr [[TMP0]], align 1 |
| ; CHECK-NEON-NEXT: [[TMP1:%.*]] = sext <16 x i8> [[WIDE_LOAD]] to <16 x i32> |
| ; CHECK-NEON-NEXT: [[PARTIAL_REDUCE:%.*]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[VEC_PHI]], <16 x i32> [[TMP1]]) |
| ; CHECK-NEON-NEXT: [[PARTIAL_REDUCE1]] = call <4 x i32> @llvm.vector.partial.reduce.add.v4i32.v16i32(<4 x i32> [[PARTIAL_REDUCE]], <16 x i32> [[TMP1]]) |
| ; CHECK-NEON-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16 |
| ; CHECK-NEON-NEXT: [[TMP2:%.*]] = icmp eq i64 [[INDEX_NEXT]], 992 |
| ; CHECK-NEON-NEXT: br i1 [[TMP2]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]] |
| ; CHECK-NEON: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEON-NEXT: [[TMP3:%.*]] = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> [[PARTIAL_REDUCE1]]) |
| ; CHECK-NEON-NEXT: store i32 [[TMP3]], ptr [[DST]], align 4 |
| ; CHECK-NEON-NEXT: br label %[[SCALAR_PH:.*]] |
| ; CHECK-NEON: [[SCALAR_PH]]: |
| ; CHECK-NEON-NEXT: br label %[[LOOP:.*]] |
| ; CHECK-NEON: [[EXIT:.*]]: |
| ; CHECK-NEON-NEXT: ret void |
| ; CHECK-NEON: [[LOOP]]: |
| ; CHECK-NEON-NEXT: [[IV:%.*]] = phi i64 [ 992, %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] |
| ; CHECK-NEON-NEXT: [[RED:%.*]] = phi i32 [ [[TMP3]], %[[SCALAR_PH]] ], [ [[ADD_1:%.*]], %[[LOOP]] ] |
| ; CHECK-NEON-NEXT: [[GEP_SRC:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV]] |
| ; CHECK-NEON-NEXT: [[L:%.*]] = load i8, ptr [[GEP_SRC]], align 1 |
| ; CHECK-NEON-NEXT: [[CONV8:%.*]] = sext i8 [[L]] to i32 |
| ; CHECK-NEON-NEXT: [[ADD:%.*]] = add i32 [[RED]], [[CONV8]] |
| ; CHECK-NEON-NEXT: [[CONV8_1:%.*]] = sext i8 [[L]] to i32 |
| ; CHECK-NEON-NEXT: [[ADD_1]] = add i32 [[ADD]], [[CONV8_1]] |
| ; CHECK-NEON-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]] |
| ; CHECK-NEON-NEXT: store i32 [[ADD_1]], ptr [[DST]], align 4 |
| ; CHECK-NEON-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 |
| ; CHECK-NEON-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 |
| ; CHECK-NEON-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP7:![0-9]+]] |
| ; |
| ; CHECK-LABEL: define void @chained_sext_adds( |
| ; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) #[[ATTR1]] { |
| ; CHECK-NEXT: [[ENTRY:.*]]: |
| ; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP1:%.*]] = shl nuw i64 [[TMP0]], 2 |
| ; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1000, [[TMP1]] |
| ; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] |
| ; CHECK: [[VECTOR_PH]]: |
| ; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64() |
| ; CHECK-NEXT: [[TMP3:%.*]] = mul nuw i64 [[TMP2]], 4 |
| ; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1000, [[TMP3]] |
| ; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 1000, [[N_MOD_VF]] |
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] |
| ; CHECK: [[VECTOR_BODY]]: |
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <vscale x 4 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP7:%.*]], %[[VECTOR_BODY]] ] |
| ; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[INDEX]] |
| ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i8>, ptr [[TMP4]], align 1 |
| ; CHECK-NEXT: [[TMP5:%.*]] = sext <vscale x 4 x i8> [[WIDE_LOAD]] to <vscale x 4 x i32> |
| ; CHECK-NEXT: [[TMP6:%.*]] = add <vscale x 4 x i32> [[VEC_PHI]], [[TMP5]] |
| ; CHECK-NEXT: [[TMP7]] = add <vscale x 4 x i32> [[TMP6]], [[TMP5]] |
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP3]] |
| ; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] |
| ; CHECK-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]] |
| ; CHECK: [[MIDDLE_BLOCK]]: |
| ; CHECK-NEXT: [[TMP9:%.*]] = call i32 @llvm.vector.reduce.add.nxv4i32(<vscale x 4 x i32> [[TMP7]]) |
| ; CHECK-NEXT: store i32 [[TMP9]], ptr [[DST]], align 4 |
| ; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 1000, [[N_VEC]] |
| ; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] |
| ; CHECK: [[SCALAR_PH]]: |
| ; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] |
| ; CHECK-NEXT: [[BC_MERGE_RDX:%.*]] = phi i32 [ [[TMP9]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] |
| ; CHECK-NEXT: br label %[[LOOP:.*]] |
| ; CHECK: [[EXIT]]: |
| ; CHECK-NEXT: ret void |
| ; CHECK: [[LOOP]]: |
| ; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ] |
| ; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[BC_MERGE_RDX]], %[[SCALAR_PH]] ], [ [[ADD_1:%.*]], %[[LOOP]] ] |
| ; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[IV]] |
| ; CHECK-NEXT: [[L:%.*]] = load i8, ptr [[GEP_SRC]], align 1 |
| ; CHECK-NEXT: [[CONV8:%.*]] = sext i8 [[L]] to i32 |
| ; CHECK-NEXT: [[ADD:%.*]] = add i32 [[RED]], [[CONV8]] |
| ; CHECK-NEXT: [[CONV8_1:%.*]] = sext i8 [[L]] to i32 |
| ; CHECK-NEXT: [[ADD_1]] = add i32 [[ADD]], [[CONV8_1]] |
| ; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr i8, ptr [[DST]], i64 [[IV]] |
| ; CHECK-NEXT: store i32 [[ADD_1]], ptr [[DST]], align 4 |
| ; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1 |
| ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1000 |
| ; CHECK-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP6:![0-9]+]] |
| ; |
| entry: |
| br label %loop |
| |
| exit: ; preds = %loop |
| ret void |
| |
| loop: ; preds = %loop, %entry |
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] |
| %red = phi i32 [ 0, %entry ], [ %add.1, %loop ] |
| %gep.src = getelementptr i8, ptr %src, i64 %iv |
| %l = load i8, ptr %gep.src, align 1 |
| %conv8 = sext i8 %l to i32 |
| %add = add i32 %red, %conv8 |
| %conv8.1 = sext i8 %l to i32 |
| %add.1 = add i32 %add, %conv8.1 |
| %gep.dst = getelementptr i8, ptr %dst, i64 %iv |
| store i32 %add.1, ptr %dst, align 4 |
| %iv.next = add i64 %iv, 1 |
| %exitcond = icmp eq i64 %iv.next, 1000 |
| br i1 %exitcond, label %exit, label %loop |
| } |
| |
| attributes #0 = { "target-cpu"="grace" } |