Skip to content

Commit

Permalink
[LV] Use SCEV to check if minimum iteration check is known. (#111310)
Browse files Browse the repository at this point in the history
Use SCEV to check if the minimum iteration check (TC < Step) is known to
be false.

This is a first step towards addressing
#111098. To catch the exact
case from the issue, we need to do extra work to make sure the wrap
flags on the shl are preserved and used by SCEV.

Note that skeleton creation will be gradually moved to VPlan and this
simplification should be done as VPlan transform eventually. The current
plan is to move skeleton creation to VPlan starting from parts closest
to the parts already created by VPlan, starting with induction resume
value creation (started with
#110577), then memory and SCEV
checks and finally minimum iteration checks.

PR: #111310
  • Loading branch information
fhahn authored Oct 18, 2024
1 parent e261519 commit c7496ce
Show file tree
Hide file tree
Showing 8 changed files with 49 additions and 58 deletions.
38 changes: 31 additions & 7 deletions llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2447,12 +2447,26 @@ void InnerLoopVectorizer::emitIterationCountCheck(BasicBlock *Bypass) {
};

TailFoldingStyle Style = Cost->getTailFoldingStyle();
if (Style == TailFoldingStyle::None)
CheckMinIters =
Builder.CreateICmp(P, Count, CreateStep(), "min.iters.check");
else if (VF.isScalable() &&
!isIndvarOverflowCheckKnownFalse(Cost, VF, UF) &&
Style != TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck) {
if (Style == TailFoldingStyle::None) {
Value *Step = CreateStep();
ScalarEvolution &SE = *PSE.getSE();
// TODO: Emit unconditional branch to vector preheader instead of
// conditional branch with known condition.
const SCEV *TripCountSCEV = SE.applyLoopGuards(SE.getSCEV(Count), OrigLoop);
// Check if the trip count is < the step.
if (SE.isKnownPredicate(P, TripCountSCEV, SE.getSCEV(Step))) {
// TODO: Ensure step is at most the trip count when determining max VF and
// UF, w/o tail folding.
CheckMinIters = Builder.getTrue();
} else if (!SE.isKnownPredicate(CmpInst::getInversePredicate(P),
TripCountSCEV, SE.getSCEV(Step))) {
// Generate the minimum iteration check only if we cannot prove the
// check is known to be true, or known to be false.
CheckMinIters = Builder.CreateICmp(P, Count, Step, "min.iters.check");
} // else step known to be < trip count, use CheckMinIters preset to false.
} else if (VF.isScalable() &&
!isIndvarOverflowCheckKnownFalse(Cost, VF, UF) &&
Style != TailFoldingStyle::DataAndControlFlowWithoutRuntimeCheck) {
// vscale is not necessarily a power-of-2, which means we cannot guarantee
// an overflow to zero when updating induction variables and so an
// additional overflow check is required before entering the vector loop.
Expand All @@ -2462,8 +2476,18 @@ void InnerLoopVectorizer::emitIterationCountCheck(BasicBlock *Bypass) {
ConstantInt::get(CountTy, cast<IntegerType>(CountTy)->getMask());
Value *LHS = Builder.CreateSub(MaxUIntTripCount, Count);

Value *Step = CreateStep();
#ifndef NDEBUG
ScalarEvolution &SE = *PSE.getSE();
const SCEV *TC2OverflowSCEV = SE.applyLoopGuards(SE.getSCEV(LHS), OrigLoop);
assert(
!isIndvarOverflowCheckKnownFalse(Cost, VF * UF) &&
!SE.isKnownPredicate(CmpInst::getInversePredicate(ICmpInst::ICMP_ULT),
TC2OverflowSCEV, SE.getSCEV(Step)) &&
"unexpectedly proved overflow check to be known");
#endif
// Don't execute the vector loop if (UMax - n) < (VF * UF).
CheckMinIters = Builder.CreateICmp(ICmpInst::ICMP_ULT, LHS, CreateStep());
CheckMinIters = Builder.CreateICmp(ICmpInst::ICMP_ULT, LHS, Step);
}

// Create new preheader for vector loop.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,7 @@ define void @f1(ptr %A) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
Expand Down
44 changes: 9 additions & 35 deletions llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
Original file line number Diff line number Diff line change
Expand Up @@ -11,10 +11,7 @@ target triple = "aarch64-unknown-linux-gnu"
define void @test_widen(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-LABEL: @test_widen(
; TFNONE-NEXT: entry:
; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE: vector.ph:
; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
Expand Down Expand Up @@ -146,10 +143,7 @@ for.cond.cleanup:
define void @test_if_then(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-LABEL: @test_if_then(
; TFNONE-NEXT: entry:
; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE: vector.ph:
; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
Expand Down Expand Up @@ -310,10 +304,7 @@ for.cond.cleanup:
define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-LABEL: @test_widen_if_then_else(
; TFNONE-NEXT: entry:
; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE: vector.ph:
; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
Expand Down Expand Up @@ -490,10 +481,7 @@ for.cond.cleanup:
define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-LABEL: @test_widen_nomask(
; TFNONE-NEXT: entry:
; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE: vector.ph:
; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
Expand Down Expand Up @@ -548,11 +536,6 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 {
;
; TFFALLBACK-LABEL: @test_widen_nomask(
; TFFALLBACK-NEXT: entry:
; TFFALLBACK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; TFFALLBACK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
; TFFALLBACK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
; TFFALLBACK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFFALLBACK: vector.ph:
; TFFALLBACK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; TFFALLBACK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
; TFFALLBACK-NEXT: [[N_MOD_VF:%.*]] = urem i64 1025, [[TMP3]]
Expand All @@ -561,20 +544,17 @@ define void @test_widen_nomask(ptr noalias %a, ptr readnone %b) #4 {
; TFFALLBACK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 2
; TFFALLBACK-NEXT: br label [[VECTOR_BODY:%.*]]
; TFFALLBACK: vector.body:
; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFFALLBACK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; TFFALLBACK-NEXT: [[TMP6:%.*]] = getelementptr i64, ptr [[B:%.*]], i64 [[INDEX]]
; TFFALLBACK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 2 x i64>, ptr [[TMP6]], align 8
; TFFALLBACK-NEXT: [[TMP7:%.*]] = call <vscale x 2 x i64> @foo_vector_nomask(<vscale x 2 x i64> [[WIDE_LOAD]])
; TFFALLBACK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i64, ptr [[A:%.*]], i64 [[INDEX]]
; TFFALLBACK-NEXT: store <vscale x 2 x i64> [[TMP7]], ptr [[TMP8]], align 8
; TFFALLBACK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
; TFFALLBACK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
; TFFALLBACK-NEXT: br i1 [[TMP9]], label [[SCALAR_PH]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; TFFALLBACK: scalar.ph:
; TFFALLBACK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[N_VEC]], [[VECTOR_BODY]] ]
; TFFALLBACK-NEXT: br label [[FOR_BODY:%.*]]
; TFFALLBACK-NEXT: br i1 [[TMP9]], label [[FOR_BODY:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; TFFALLBACK: for.body:
; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
; TFFALLBACK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ [[N_VEC]], [[VECTOR_BODY]] ]
; TFFALLBACK-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDVARS_IV]]
; TFFALLBACK-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 8
; TFFALLBACK-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR5:[0-9]+]]
Expand Down Expand Up @@ -626,10 +606,7 @@ for.cond.cleanup:
define void @test_widen_optmask(ptr noalias %a, ptr readnone %b) #4 {
; TFNONE-LABEL: @test_widen_optmask(
; TFNONE-NEXT: entry:
; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE: vector.ph:
; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
Expand Down Expand Up @@ -791,10 +768,7 @@ for.cond.cleanup:
define double @test_widen_fmuladd_and_call(ptr noalias %a, ptr readnone %b, double %m) #4 {
; TFNONE-LABEL: @test_widen_fmuladd_and_call(
; TFNONE-NEXT: entry:
; TFNONE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 2
; TFNONE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
; TFNONE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; TFNONE: vector.ph:
; TFNONE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; TFNONE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@ define void @test_invar_gep(ptr %dst) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 100, [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -757,8 +757,7 @@ define void @simple_memset_trip1024(i32 %val, ptr %ptr, i64 %n) #0 {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1024, [[TMP1]]
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; CHECK: vector.ph:
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,7 @@ target triple = "aarch64-unknown-linux-gnu"
define void @test_widen(ptr noalias %a, ptr readnone %b) #1 {
; WIDE-LABEL: @test_widen(
; WIDE-NEXT: entry:
; WIDE-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
; WIDE-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
; WIDE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 1025, [[TMP1]]
; WIDE-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; WIDE-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
; WIDE: vector.ph:
; WIDE-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; WIDE-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
Expand Down
4 changes: 2 additions & 2 deletions llvm/test/Transforms/LoopVectorize/if-reduction.ll
Original file line number Diff line number Diff line change
Expand Up @@ -1659,6 +1659,7 @@ for.end: ; preds = %for.body, %entry
ret i64 %1
}

; FIXME: %indvars.iv.next is poison on first iteration due to sub nuw 0, 1.
define i32 @fcmp_0_sub_select1(ptr noalias %x, i32 %N) nounwind readonly {
; CHECK-LABEL: define i32 @fcmp_0_sub_select1(
; CHECK-SAME: ptr noalias [[X:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
Expand All @@ -1668,8 +1669,7 @@ define i32 @fcmp_0_sub_select1(ptr noalias %x, i32 %N) nounwind readonly {
; CHECK: [[FOR_HEADER]]:
; CHECK-NEXT: [[ZEXT:%.*]] = zext i32 [[N]] to i64
; CHECK-NEXT: [[TMP0:%.*]] = sub i64 0, [[ZEXT]]
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP0]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
; CHECK: [[VECTOR_PH]]:
; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP0]], 4
; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP0]], [[N_MOD_VF]]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,7 @@ exit:

; Test case to make sure that uses of versioned strides of type i1 are properly
; extended. From https://github.com/llvm/llvm-project/issues/91369.
; TODO: Better check (udiv i64 15, %g.64) after checking if %g == 1.
define void @zext_of_i1_stride(i1 %g, ptr %dst) mustprogress {
; CHECK-LABEL: define void @zext_of_i1_stride(
; CHECK-SAME: i1 [[G:%.*]], ptr [[DST:%.*]]) #[[ATTR0:[0-9]+]] {
Expand All @@ -423,8 +424,7 @@ define void @zext_of_i1_stride(i1 %g, ptr %dst) mustprogress {
; CHECK-NEXT: [[G_64:%.*]] = zext i1 [[G]] to i64
; CHECK-NEXT: [[TMP0:%.*]] = udiv i64 15, [[G_64]]
; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; CHECK-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; CHECK: vector.scevcheck:
; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i1 [[G]], true
; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
Expand Down Expand Up @@ -489,8 +489,7 @@ define void @sext_of_i1_stride(i1 %g, ptr %dst) mustprogress {
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[UMAX]], -1
; CHECK-NEXT: [[TMP1:%.*]] = udiv i64 [[TMP0]], [[G_64]]
; CHECK-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 4
; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; CHECK-NEXT: br i1 true, label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
; CHECK: vector.scevcheck:
; CHECK-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i1 [[G]], true
; CHECK-NEXT: br i1 [[IDENT_CHECK]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
Expand Down

0 comments on commit c7496ce

Please sign in to comment.