From 2e83e54231f32e629a0011fff77cc86316da142c Mon Sep 17 00:00:00 2001 From: Mel Chen Date: Thu, 10 Jul 2025 02:06:53 -0700 Subject: [PATCH] [VPlan] Use VPTypeAnalysis to get the step type of widen pointer induction --- .../Transforms/Vectorize/VPlanTransforms.cpp | 4 +- .../ivopt-widen-ptr-induction.ll | 75 +++++++++++++++++++ 2 files changed, 77 insertions(+), 2 deletions(-) create mode 100644 llvm/test/Transforms/LoopVectorize/ivopt-widen-ptr-induction.ll diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp index 02cea8620d271..53eee046b18da 100644 --- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp +++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp @@ -854,8 +854,8 @@ optimizeLatchExitInductionUser(VPlan &Plan, VPTypeAnalysis &TypeInfo, if (ScalarTy->isIntegerTy()) return B.createNaryOp(Instruction::Sub, {EndValue, Step}, {}, "ind.escape"); if (ScalarTy->isPointerTy()) { - auto *Zero = Plan.getOrAddLiveIn( - ConstantInt::get(Step->getLiveInIRValue()->getType(), 0)); + Type *StepTy = TypeInfo.inferScalarType(Step); + auto *Zero = Plan.getOrAddLiveIn(ConstantInt::get(StepTy, 0)); return B.createPtrAdd(EndValue, B.createNaryOp(Instruction::Sub, {Zero, Step}), {}, "ind.escape"); diff --git a/llvm/test/Transforms/LoopVectorize/ivopt-widen-ptr-induction.ll b/llvm/test/Transforms/LoopVectorize/ivopt-widen-ptr-induction.ll new file mode 100644 index 0000000000000..e24c112e9e65c --- /dev/null +++ b/llvm/test/Transforms/LoopVectorize/ivopt-widen-ptr-induction.ll @@ -0,0 +1,75 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 +; RUN: opt -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=4 -lv-strided-pointer-ivs -S < %s | FileCheck %s + +define i64 @widen_ptr_indvar(ptr noalias %a, ptr noalias %b ,i64 %stride, i64 %n) { +; CHECK-LABEL: define i64 @widen_ptr_indvar( +; CHECK-SAME: ptr noalias [[A:%.*]], ptr noalias [[B:%.*]], i64 [[STRIDE:%.*]], i64 [[N:%.*]]) { +; CHECK-NEXT: [[ENTRY:.*]]: +; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[N]], 1 +; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[STRIDE]], 3 +; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP9]], 4 +; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]] +; CHECK: [[VECTOR_PH]]: +; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP9]], 4 +; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP9]], [[N_MOD_VF]] +; CHECK-NEXT: [[TMP2:%.*]] = mul i64 [[N_VEC]], [[TMP1]] +; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr null, i64 [[TMP2]] +; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] +; CHECK: [[VECTOR_BODY]]: +; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] +; CHECK-NEXT: [[TMP4:%.*]] = getelementptr i64, ptr [[A]], i64 [[INDEX]] +; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i64, ptr [[TMP4]], i32 0 +; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP5]], align 8 +; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4 +; CHECK-NEXT: [[TMP6:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]] +; CHECK-NEXT: br i1 [[TMP6]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] +; CHECK: [[MIDDLE_BLOCK]]: +; CHECK-NEXT: [[TMP7:%.*]] = extractelement <4 x i64> [[WIDE_LOAD]], i32 3 +; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP9]], [[N_VEC]] +; CHECK-NEXT: [[TMP8:%.*]] = sub i64 0, [[TMP1]] +; CHECK-NEXT: [[IND_ESCAPE:%.*]] = getelementptr i8, ptr [[TMP3]], i64 [[TMP8]] +; CHECK-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]] +; CHECK: [[SCALAR_PH]]: +; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ] +; CHECK-NEXT: [[BC_RESUME_VAL1:%.*]] = phi ptr [ [[TMP3]], %[[MIDDLE_BLOCK]] ], [ null, %[[ENTRY]] ] +; CHECK-NEXT: br label %[[FOR_BODY:.*]] +; CHECK: [[FOR_BODY]]: +; CHECK-NEXT: [[INDVAR:%.*]] = phi i64 [ [[INDVAR_NEXT:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[PTR_IV:%.*]] = phi ptr [ [[PTR_IV_NEXT:%.*]], %[[FOR_BODY]] ], [ [[BC_RESUME_VAL1]], %[[SCALAR_PH]] ] +; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr i64, ptr [[A]], i64 [[INDVAR]] +; CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[ARRAYIDX]], align 8 +; CHECK-NEXT: [[PTR_IV_NEXT]] = getelementptr i64, ptr [[PTR_IV]], i64 [[STRIDE]] +; CHECK-NEXT: [[INDVAR_NEXT]] = add i64 [[INDVAR]], 1 +; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVAR]], [[N]] +; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label %[[EXIT]], label %[[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]] +; CHECK: [[EXIT]]: +; CHECK-NEXT: [[PTR_IV_LCSSA:%.*]] = phi ptr [ [[PTR_IV]], %[[FOR_BODY]] ], [ [[IND_ESCAPE]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[DOTLCSSA:%.*]] = phi i64 [ [[TMP0]], %[[FOR_BODY]] ], [ [[TMP7]], %[[MIDDLE_BLOCK]] ] +; CHECK-NEXT: [[CAST_PTR:%.*]] = ptrtoint ptr [[PTR_IV_LCSSA]] to i64 +; CHECK-NEXT: [[RESULT:%.*]] = add i64 [[CAST_PTR]], [[DOTLCSSA]] +; CHECK-NEXT: ret i64 [[RESULT]] +; +entry: + br label %for.body + +for.body: + %indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %entry ] + %ptr.iv = phi ptr [ %ptr.iv.next, %for.body ], [ null, %entry ] + %arrayidx = getelementptr i64, ptr %a, i64 %indvar + %0 = load i64, ptr %arrayidx, align 8 + %ptr.iv.next = getelementptr i64, ptr %ptr.iv, i64 %stride + %indvar.next = add i64 %indvar, 1 + %exitcond.not = icmp eq i64 %indvar, %n + br i1 %exitcond.not, label %exit, label %for.body + +exit: + %cast.ptr = ptrtoint ptr %ptr.iv to i64 + %result = add i64 %cast.ptr, %0 + ret i64 %result +} +;. +; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} +; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} +; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} +; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]} +;.