[LoopUnroll] Adjust CostKind query

When TTI was updated to use an explicit cost, TCK_CodeSize was used
although the default implicit cost would have been the hand-wavey
cost of size and latency. So, revert back to this behaviour. This is
not expected to have (much) impact on targets since most (all?) of
them return the same value for SizeAndLatency and CodeSize.

When optimising for size, the logic has been changed to query
CodeSize costs instead of SizeAndLatency.

This patch also adds a testing option in the unroller so that
OptSize thresholds can be specified.

Differential Revision: https://reviews.llvm.org/D85723
diff --git a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
index 6160bc8..c6524b6 100644
--- a/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
+++ b/llvm/lib/Target/ARM/ARMTargetTransformInfo.cpp
@@ -1757,7 +1757,8 @@
 
       SmallVector<const Value*, 4> Operands(I.value_op_begin(),
                                             I.value_op_end());
-      Cost += getUserCost(&I, Operands, TargetTransformInfo::TCK_CodeSize);
+      Cost +=
+        getUserCost(&I, Operands, TargetTransformInfo::TCK_SizeAndLatency);
     }
   }
 
diff --git a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
index 2b61039..29ac496f 100644
--- a/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopUnrollPass.cpp
@@ -83,6 +83,12 @@
     UnrollThreshold("unroll-threshold", cl::Hidden,
                     cl::desc("The cost threshold for loop unrolling"));
 
+static cl::opt<unsigned>
+    UnrollOptSizeThreshold(
+      "unroll-optsize-threshold", cl::init(0), cl::Hidden,
+      cl::desc("The cost threshold for loop unrolling when optimizing for "
+               "size"));
+
 static cl::opt<unsigned> UnrollPartialThreshold(
     "unroll-partial-threshold", cl::Hidden,
     cl::desc("The cost threshold for partial loop unrolling"));
@@ -188,9 +194,9 @@
   UP.Threshold =
       OptLevel > 2 ? UnrollThresholdAggressive : UnrollThresholdDefault;
   UP.MaxPercentThresholdBoost = 400;
-  UP.OptSizeThreshold = 0;
+  UP.OptSizeThreshold = UnrollOptSizeThreshold;
   UP.PartialThreshold = 150;
-  UP.PartialOptSizeThreshold = 0;
+  UP.PartialOptSizeThreshold = UnrollOptSizeThreshold;
   UP.Count = 0;
   UP.DefaultUnrollRuntimeCount = 8;
   UP.MaxCount = std::numeric_limits<unsigned>::max();
@@ -381,6 +387,10 @@
     assert(CostWorklist.empty() && "Must start with an empty cost list");
     assert(PHIUsedList.empty() && "Must start with an empty phi used list");
     CostWorklist.push_back(&RootI);
+    TargetTransformInfo::TargetCostKind CostKind =
+      RootI.getFunction()->hasMinSize() ?
+      TargetTransformInfo::TCK_CodeSize :
+      TargetTransformInfo::TCK_SizeAndLatency;
     for (;; --Iteration) {
       do {
         Instruction *I = CostWorklist.pop_back_val();
@@ -421,7 +431,7 @@
 
         // First accumulate the cost of this instruction.
         if (!Cost.IsFree) {
-          UnrolledCost += TTI.getUserCost(I, TargetTransformInfo::TCK_CodeSize);
+          UnrolledCost += TTI.getUserCost(I, CostKind);
           LLVM_DEBUG(dbgs() << "Adding cost of instruction (iteration "
                             << Iteration << "): ");
           LLVM_DEBUG(I->dump());
@@ -461,6 +471,9 @@
 
   LLVM_DEBUG(dbgs() << "Starting LoopUnroll profitability analysis...\n");
 
+  TargetTransformInfo::TargetCostKind CostKind =
+    L->getHeader()->getParent()->hasMinSize() ?
+    TargetTransformInfo::TCK_CodeSize : TargetTransformInfo::TCK_SizeAndLatency;
   // Simulate execution of each iteration of the loop counting instructions,
   // which would be simplified.
   // Since the same load will take different values on different iterations,
@@ -514,7 +527,7 @@
 
         // Track this instruction's expected baseline cost when executing the
         // rolled loop form.
-        RolledDynamicCost += TTI.getUserCost(&I, TargetTransformInfo::TCK_CodeSize);
+        RolledDynamicCost += TTI.getUserCost(&I, CostKind);
 
         // Visit the instruction to analyze its loop cost after unrolling,
         // and if the visitor returns true, mark the instruction as free after
diff --git a/llvm/test/Transforms/LoopUnroll/ARM/instr-size-costs.ll b/llvm/test/Transforms/LoopUnroll/ARM/instr-size-costs.ll
new file mode 100644
index 0000000..0c5d7b6
--- /dev/null
+++ b/llvm/test/Transforms/LoopUnroll/ARM/instr-size-costs.ll
@@ -0,0 +1,397 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -loop-unroll -unroll-allow-partial -unroll-optsize-threshold=18 -mtriple=thumbv8 -S %s -o - | FileCheck %s --check-prefix=CHECK-V8
+
+define void @test_i32_add_optsize(i32* %a, i32* %b, i32* %c) #0 {
+; CHECK-V8-LABEL: @test_i32_add_optsize(
+; CHECK-V8-NEXT:  entry:
+; CHECK-V8-NEXT:    br label [[LOOP:%.*]]
+; CHECK-V8:       loop:
+; CHECK-V8-NEXT:    [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT_1:%.*]], [[LOOP]] ]
+; CHECK-V8-NEXT:    [[ADDR_A:%.*]] = getelementptr i32, i32* [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[ADDR_B:%.*]] = getelementptr i32, i32* [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[DATA_A:%.*]] = load i32, i32* [[ADDR_A]], align 4
+; CHECK-V8-NEXT:    [[DATA_B:%.*]] = load i32, i32* [[ADDR_B]], align 4
+; CHECK-V8-NEXT:    [[RES:%.*]] = add i32 [[DATA_A]], [[DATA_B]]
+; CHECK-V8-NEXT:    [[ADDR_C:%.*]] = getelementptr i32, i32* [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    store i32 [[RES]], i32* [[ADDR_C]], align 4
+; CHECK-V8-NEXT:    [[COUNT:%.*]] = add nuw nsw i32 [[IV]], 1
+; CHECK-V8-NEXT:    [[ADDR_A_1:%.*]] = getelementptr i32, i32* [[A]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    [[ADDR_B_1:%.*]] = getelementptr i32, i32* [[B]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    [[DATA_A_1:%.*]] = load i32, i32* [[ADDR_A_1]], align 4
+; CHECK-V8-NEXT:    [[DATA_B_1:%.*]] = load i32, i32* [[ADDR_B_1]], align 4
+; CHECK-V8-NEXT:    [[RES_1:%.*]] = add i32 [[DATA_A_1]], [[DATA_B_1]]
+; CHECK-V8-NEXT:    [[ADDR_C_1:%.*]] = getelementptr i32, i32* [[C]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    store i32 [[RES_1]], i32* [[ADDR_C_1]], align 4
+; CHECK-V8-NEXT:    [[COUNT_1]] = add nuw nsw i32 [[COUNT]], 1
+; CHECK-V8-NEXT:    [[END_1:%.*]] = icmp ne i32 [[COUNT_1]], 100
+; CHECK-V8-NEXT:    br i1 [[END_1]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK-V8:       exit:
+; CHECK-V8-NEXT:    ret void
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i32 [ 0, %entry ], [ %count, %loop ]
+  %addr.a = getelementptr i32, i32* %a, i32 %iv
+  %addr.b = getelementptr i32, i32* %b, i32 %iv
+  %data.a = load i32, i32* %addr.a
+  %data.b = load i32, i32* %addr.b
+  %res = add i32 %data.a, %data.b
+  %addr.c = getelementptr i32, i32* %c, i32 %iv
+  store i32 %res, i32* %addr.c
+  %count = add nuw i32 %iv, 1
+  %end = icmp ne i32 %count, 100
+  br i1 %end, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+define void @test_i32_add_minsize(i32* %a, i32* %b, i32* %c) #1 {
+; CHECK-V8-LABEL: @test_i32_add_minsize(
+; CHECK-V8-NEXT:  entry:
+; CHECK-V8-NEXT:    br label [[LOOP:%.*]]
+; CHECK-V8:       loop:
+; CHECK-V8-NEXT:    [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT_1:%.*]], [[LOOP]] ]
+; CHECK-V8-NEXT:    [[ADDR_A:%.*]] = getelementptr i32, i32* [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[ADDR_B:%.*]] = getelementptr i32, i32* [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[DATA_A:%.*]] = load i32, i32* [[ADDR_A]], align 4
+; CHECK-V8-NEXT:    [[DATA_B:%.*]] = load i32, i32* [[ADDR_B]], align 4
+; CHECK-V8-NEXT:    [[RES:%.*]] = add i32 [[DATA_A]], [[DATA_B]]
+; CHECK-V8-NEXT:    [[ADDR_C:%.*]] = getelementptr i32, i32* [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    store i32 [[RES]], i32* [[ADDR_C]], align 4
+; CHECK-V8-NEXT:    [[COUNT:%.*]] = add nuw nsw i32 [[IV]], 1
+; CHECK-V8-NEXT:    [[ADDR_A_1:%.*]] = getelementptr i32, i32* [[A]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    [[ADDR_B_1:%.*]] = getelementptr i32, i32* [[B]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    [[DATA_A_1:%.*]] = load i32, i32* [[ADDR_A_1]], align 4
+; CHECK-V8-NEXT:    [[DATA_B_1:%.*]] = load i32, i32* [[ADDR_B_1]], align 4
+; CHECK-V8-NEXT:    [[RES_1:%.*]] = add i32 [[DATA_A_1]], [[DATA_B_1]]
+; CHECK-V8-NEXT:    [[ADDR_C_1:%.*]] = getelementptr i32, i32* [[C]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    store i32 [[RES_1]], i32* [[ADDR_C_1]], align 4
+; CHECK-V8-NEXT:    [[COUNT_1]] = add nuw nsw i32 [[COUNT]], 1
+; CHECK-V8-NEXT:    [[END_1:%.*]] = icmp ne i32 [[COUNT_1]], 100
+; CHECK-V8-NEXT:    br i1 [[END_1]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK-V8:       exit:
+; CHECK-V8-NEXT:    ret void
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i32 [ 0, %entry ], [ %count, %loop ]
+  %addr.a = getelementptr i32, i32* %a, i32 %iv
+  %addr.b = getelementptr i32, i32* %b, i32 %iv
+  %data.a = load i32, i32* %addr.a
+  %data.b = load i32, i32* %addr.b
+  %res = add i32 %data.a, %data.b
+  %addr.c = getelementptr i32, i32* %c, i32 %iv
+  store i32 %res, i32* %addr.c
+  %count = add nuw i32 %iv, 1
+  %end = icmp ne i32 %count, 100
+  br i1 %end, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+define void @test_i64_add_optsize(i64* %a, i64* %b, i64* %c) #0 {
+; CHECK-V8-LABEL: @test_i64_add_optsize(
+; CHECK-V8-NEXT:  entry:
+; CHECK-V8-NEXT:    br label [[LOOP:%.*]]
+; CHECK-V8:       loop:
+; CHECK-V8-NEXT:    [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT_1:%.*]], [[LOOP]] ]
+; CHECK-V8-NEXT:    [[ADDR_A:%.*]] = getelementptr i64, i64* [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[ADDR_B:%.*]] = getelementptr i64, i64* [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[DATA_A:%.*]] = load i64, i64* [[ADDR_A]], align 4
+; CHECK-V8-NEXT:    [[DATA_B:%.*]] = load i64, i64* [[ADDR_B]], align 4
+; CHECK-V8-NEXT:    [[RES:%.*]] = add i64 [[DATA_A]], [[DATA_B]]
+; CHECK-V8-NEXT:    [[ADDR_C:%.*]] = getelementptr i64, i64* [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    store i64 [[RES]], i64* [[ADDR_C]], align 4
+; CHECK-V8-NEXT:    [[COUNT:%.*]] = add nuw nsw i32 [[IV]], 1
+; CHECK-V8-NEXT:    [[ADDR_A_1:%.*]] = getelementptr i64, i64* [[A]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    [[ADDR_B_1:%.*]] = getelementptr i64, i64* [[B]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    [[DATA_A_1:%.*]] = load i64, i64* [[ADDR_A_1]], align 4
+; CHECK-V8-NEXT:    [[DATA_B_1:%.*]] = load i64, i64* [[ADDR_B_1]], align 4
+; CHECK-V8-NEXT:    [[RES_1:%.*]] = add i64 [[DATA_A_1]], [[DATA_B_1]]
+; CHECK-V8-NEXT:    [[ADDR_C_1:%.*]] = getelementptr i64, i64* [[C]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    store i64 [[RES_1]], i64* [[ADDR_C_1]], align 4
+; CHECK-V8-NEXT:    [[COUNT_1]] = add nuw nsw i32 [[COUNT]], 1
+; CHECK-V8-NEXT:    [[END_1:%.*]] = icmp ne i32 [[COUNT_1]], 100
+; CHECK-V8-NEXT:    br i1 [[END_1]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK-V8:       exit:
+; CHECK-V8-NEXT:    ret void
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i32 [ 0, %entry ], [ %count, %loop ]
+  %addr.a = getelementptr i64, i64* %a, i32 %iv
+  %addr.b = getelementptr i64, i64* %b, i32 %iv
+  %data.a = load i64, i64* %addr.a
+  %data.b = load i64, i64* %addr.b
+  %res = add i64 %data.a, %data.b
+  %addr.c = getelementptr i64, i64* %c, i32 %iv
+  store i64 %res, i64* %addr.c
+  %count = add nuw i32 %iv, 1
+  %end = icmp ne i32 %count, 100
+  br i1 %end, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+define void @test_i64_add_minsize(i64* %a, i64* %b, i64* %c) #1 {
+; CHECK-V8-LABEL: @test_i64_add_minsize(
+; CHECK-V8-NEXT:  entry:
+; CHECK-V8-NEXT:    br label [[LOOP:%.*]]
+; CHECK-V8:       loop:
+; CHECK-V8-NEXT:    [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT_1:%.*]], [[LOOP]] ]
+; CHECK-V8-NEXT:    [[ADDR_A:%.*]] = getelementptr i64, i64* [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[ADDR_B:%.*]] = getelementptr i64, i64* [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[DATA_A:%.*]] = load i64, i64* [[ADDR_A]], align 4
+; CHECK-V8-NEXT:    [[DATA_B:%.*]] = load i64, i64* [[ADDR_B]], align 4
+; CHECK-V8-NEXT:    [[RES:%.*]] = add i64 [[DATA_A]], [[DATA_B]]
+; CHECK-V8-NEXT:    [[ADDR_C:%.*]] = getelementptr i64, i64* [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    store i64 [[RES]], i64* [[ADDR_C]], align 4
+; CHECK-V8-NEXT:    [[COUNT:%.*]] = add nuw nsw i32 [[IV]], 1
+; CHECK-V8-NEXT:    [[ADDR_A_1:%.*]] = getelementptr i64, i64* [[A]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    [[ADDR_B_1:%.*]] = getelementptr i64, i64* [[B]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    [[DATA_A_1:%.*]] = load i64, i64* [[ADDR_A_1]], align 4
+; CHECK-V8-NEXT:    [[DATA_B_1:%.*]] = load i64, i64* [[ADDR_B_1]], align 4
+; CHECK-V8-NEXT:    [[RES_1:%.*]] = add i64 [[DATA_A_1]], [[DATA_B_1]]
+; CHECK-V8-NEXT:    [[ADDR_C_1:%.*]] = getelementptr i64, i64* [[C]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    store i64 [[RES_1]], i64* [[ADDR_C_1]], align 4
+; CHECK-V8-NEXT:    [[COUNT_1]] = add nuw nsw i32 [[COUNT]], 1
+; CHECK-V8-NEXT:    [[END_1:%.*]] = icmp ne i32 [[COUNT_1]], 100
+; CHECK-V8-NEXT:    br i1 [[END_1]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK-V8:       exit:
+; CHECK-V8-NEXT:    ret void
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i32 [ 0, %entry ], [ %count, %loop ]
+  %addr.a = getelementptr i64, i64* %a, i32 %iv
+  %addr.b = getelementptr i64, i64* %b, i32 %iv
+  %data.a = load i64, i64* %addr.a
+  %data.b = load i64, i64* %addr.b
+  %res = add i64 %data.a, %data.b
+  %addr.c = getelementptr i64, i64* %c, i32 %iv
+  store i64 %res, i64* %addr.c
+  %count = add nuw i32 %iv, 1
+  %end = icmp ne i32 %count, 100
+  br i1 %end, label %loop, label %exit
+
+exit:
+  ret void
+}
+
+define i32 @test_i32_select_optsize(i32* %a, i32* %b, i32* %c) #0 {
+; CHECK-V8-LABEL: @test_i32_select_optsize(
+; CHECK-V8-NEXT:  entry:
+; CHECK-V8-NEXT:    br label [[LOOP:%.*]]
+; CHECK-V8:       loop:
+; CHECK-V8-NEXT:    [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT_1:%.*]], [[LOOP]] ]
+; CHECK-V8-NEXT:    [[ACC:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ACC_NEXT_1:%.*]], [[LOOP]] ]
+; CHECK-V8-NEXT:    [[ADDR_A:%.*]] = getelementptr i32, i32* [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[ADDR_B:%.*]] = getelementptr i32, i32* [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[DATA_A:%.*]] = load i32, i32* [[ADDR_A]], align 4
+; CHECK-V8-NEXT:    [[DATA_B:%.*]] = load i32, i32* [[ADDR_B]], align 4
+; CHECK-V8-NEXT:    [[UGT:%.*]] = icmp ugt i32 [[DATA_A]], [[DATA_B]]
+; CHECK-V8-NEXT:    [[UMAX:%.*]] = select i1 [[UGT]], i32 [[DATA_A]], i32 [[DATA_B]]
+; CHECK-V8-NEXT:    [[ACC_NEXT:%.*]] = add i32 [[UMAX]], [[ACC]]
+; CHECK-V8-NEXT:    [[ADDR_C:%.*]] = getelementptr i32, i32* [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    store i32 [[UMAX]], i32* [[ADDR_C]], align 4
+; CHECK-V8-NEXT:    [[COUNT:%.*]] = add nuw nsw i32 [[IV]], 1
+; CHECK-V8-NEXT:    [[ADDR_A_1:%.*]] = getelementptr i32, i32* [[A]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    [[ADDR_B_1:%.*]] = getelementptr i32, i32* [[B]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    [[DATA_A_1:%.*]] = load i32, i32* [[ADDR_A_1]], align 4
+; CHECK-V8-NEXT:    [[DATA_B_1:%.*]] = load i32, i32* [[ADDR_B_1]], align 4
+; CHECK-V8-NEXT:    [[UGT_1:%.*]] = icmp ugt i32 [[DATA_A_1]], [[DATA_B_1]]
+; CHECK-V8-NEXT:    [[UMAX_1:%.*]] = select i1 [[UGT_1]], i32 [[DATA_A_1]], i32 [[DATA_B_1]]
+; CHECK-V8-NEXT:    [[ACC_NEXT_1]] = add i32 [[UMAX_1]], [[ACC_NEXT]]
+; CHECK-V8-NEXT:    [[ADDR_C_1:%.*]] = getelementptr i32, i32* [[C]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    store i32 [[UMAX_1]], i32* [[ADDR_C_1]], align 4
+; CHECK-V8-NEXT:    [[COUNT_1]] = add nuw nsw i32 [[COUNT]], 1
+; CHECK-V8-NEXT:    [[END_1:%.*]] = icmp ne i32 [[COUNT_1]], 100
+; CHECK-V8-NEXT:    br i1 [[END_1]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK-V8:       exit:
+; CHECK-V8-NEXT:    [[ACC_NEXT_LCSSA:%.*]] = phi i32 [ [[ACC_NEXT_1]], [[LOOP]] ]
+; CHECK-V8-NEXT:    ret i32 [[ACC_NEXT_LCSSA]]
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i32 [ 0, %entry ], [ %count, %loop ]
+  %acc = phi i32 [ 0, %entry], [ %acc.next, %loop ]
+  %addr.a = getelementptr i32, i32* %a, i32 %iv
+  %addr.b = getelementptr i32, i32* %b, i32 %iv
+  %data.a = load i32, i32* %addr.a
+  %data.b = load i32, i32* %addr.b
+  %ugt = icmp ugt i32 %data.a, %data.b
+  %umax = select i1 %ugt, i32 %data.a, i32 %data.b
+  %acc.next = add i32 %umax, %acc
+  %addr.c = getelementptr i32, i32* %c, i32 %iv
+  store i32 %umax, i32* %addr.c
+  %count = add nuw i32 %iv, 1
+  %end = icmp ne i32 %count, 100
+  br i1 %end, label %loop, label %exit
+
+exit:
+  ret i32 %acc.next
+}
+
+define i32 @test_i32_select_minsize(i32* %a, i32* %b, i32* %c) #1 {
+; CHECK-V8-LABEL: @test_i32_select_minsize(
+; CHECK-V8-NEXT:  entry:
+; CHECK-V8-NEXT:    br label [[LOOP:%.*]]
+; CHECK-V8:       loop:
+; CHECK-V8-NEXT:    [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT_1:%.*]], [[LOOP]] ]
+; CHECK-V8-NEXT:    [[ACC:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[ACC_NEXT_1:%.*]], [[LOOP]] ]
+; CHECK-V8-NEXT:    [[ADDR_A:%.*]] = getelementptr i32, i32* [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[ADDR_B:%.*]] = getelementptr i32, i32* [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[DATA_A:%.*]] = load i32, i32* [[ADDR_A]], align 4
+; CHECK-V8-NEXT:    [[DATA_B:%.*]] = load i32, i32* [[ADDR_B]], align 4
+; CHECK-V8-NEXT:    [[UGT:%.*]] = icmp ugt i32 [[DATA_A]], [[DATA_B]]
+; CHECK-V8-NEXT:    [[UMAX:%.*]] = select i1 [[UGT]], i32 [[DATA_A]], i32 [[DATA_B]]
+; CHECK-V8-NEXT:    [[ACC_NEXT:%.*]] = add i32 [[UMAX]], [[ACC]]
+; CHECK-V8-NEXT:    [[ADDR_C:%.*]] = getelementptr i32, i32* [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    store i32 [[UMAX]], i32* [[ADDR_C]], align 4
+; CHECK-V8-NEXT:    [[COUNT:%.*]] = add nuw nsw i32 [[IV]], 1
+; CHECK-V8-NEXT:    [[ADDR_A_1:%.*]] = getelementptr i32, i32* [[A]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    [[ADDR_B_1:%.*]] = getelementptr i32, i32* [[B]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    [[DATA_A_1:%.*]] = load i32, i32* [[ADDR_A_1]], align 4
+; CHECK-V8-NEXT:    [[DATA_B_1:%.*]] = load i32, i32* [[ADDR_B_1]], align 4
+; CHECK-V8-NEXT:    [[UGT_1:%.*]] = icmp ugt i32 [[DATA_A_1]], [[DATA_B_1]]
+; CHECK-V8-NEXT:    [[UMAX_1:%.*]] = select i1 [[UGT_1]], i32 [[DATA_A_1]], i32 [[DATA_B_1]]
+; CHECK-V8-NEXT:    [[ACC_NEXT_1]] = add i32 [[UMAX_1]], [[ACC_NEXT]]
+; CHECK-V8-NEXT:    [[ADDR_C_1:%.*]] = getelementptr i32, i32* [[C]], i32 [[COUNT]]
+; CHECK-V8-NEXT:    store i32 [[UMAX_1]], i32* [[ADDR_C_1]], align 4
+; CHECK-V8-NEXT:    [[COUNT_1]] = add nuw nsw i32 [[COUNT]], 1
+; CHECK-V8-NEXT:    [[END_1:%.*]] = icmp ne i32 [[COUNT_1]], 100
+; CHECK-V8-NEXT:    br i1 [[END_1]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK-V8:       exit:
+; CHECK-V8-NEXT:    [[ACC_NEXT_LCSSA:%.*]] = phi i32 [ [[ACC_NEXT_1]], [[LOOP]] ]
+; CHECK-V8-NEXT:    ret i32 [[ACC_NEXT_LCSSA]]
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i32 [ 0, %entry ], [ %count, %loop ]
+  %acc = phi i32 [ 0, %entry], [ %acc.next, %loop ]
+  %addr.a = getelementptr i32, i32* %a, i32 %iv
+  %addr.b = getelementptr i32, i32* %b, i32 %iv
+  %data.a = load i32, i32* %addr.a
+  %data.b = load i32, i32* %addr.b
+  %ugt = icmp ugt i32 %data.a, %data.b
+  %umax = select i1 %ugt, i32 %data.a, i32 %data.b
+  %acc.next = add i32 %umax, %acc
+  %addr.c = getelementptr i32, i32* %c, i32 %iv
+  store i32 %umax, i32* %addr.c
+  %count = add nuw i32 %iv, 1
+  %end = icmp ne i32 %count, 100
+  br i1 %end, label %loop, label %exit
+
+exit:
+  ret i32 %acc.next
+}
+
+define i64 @test_i64_select_optsize(i64* %a, i64* %b, i64* %c) #0 {
+; CHECK-V8-LABEL: @test_i64_select_optsize(
+; CHECK-V8-NEXT:  entry:
+; CHECK-V8-NEXT:    br label [[LOOP:%.*]]
+; CHECK-V8:       loop:
+; CHECK-V8-NEXT:    [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT:%.*]], [[LOOP]] ]
+; CHECK-V8-NEXT:    [[ACC:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ACC_NEXT:%.*]], [[LOOP]] ]
+; CHECK-V8-NEXT:    [[ADDR_A:%.*]] = getelementptr i64, i64* [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[ADDR_B:%.*]] = getelementptr i64, i64* [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[DATA_A:%.*]] = load i64, i64* [[ADDR_A]], align 4
+; CHECK-V8-NEXT:    [[DATA_B:%.*]] = load i64, i64* [[ADDR_B]], align 4
+; CHECK-V8-NEXT:    [[UGT:%.*]] = icmp ugt i64 [[DATA_A]], [[DATA_B]]
+; CHECK-V8-NEXT:    [[UMAX:%.*]] = select i1 [[UGT]], i64 [[DATA_A]], i64 [[DATA_B]]
+; CHECK-V8-NEXT:    [[ACC_NEXT]] = add i64 [[UMAX]], [[ACC]]
+; CHECK-V8-NEXT:    [[ADDR_C:%.*]] = getelementptr i64, i64* [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    store i64 [[UMAX]], i64* [[ADDR_C]], align 4
+; CHECK-V8-NEXT:    [[COUNT]] = add nuw i32 [[IV]], 1
+; CHECK-V8-NEXT:    [[END:%.*]] = icmp ne i32 [[COUNT]], 100
+; CHECK-V8-NEXT:    br i1 [[END]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK-V8:       exit:
+; CHECK-V8-NEXT:    [[ACC_NEXT_LCSSA:%.*]] = phi i64 [ [[ACC_NEXT]], [[LOOP]] ]
+; CHECK-V8-NEXT:    ret i64 [[ACC_NEXT_LCSSA]]
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i32 [ 0, %entry ], [ %count, %loop ]
+  %acc = phi i64 [ 0, %entry], [ %acc.next, %loop ]
+  %addr.a = getelementptr i64, i64* %a, i32 %iv
+  %addr.b = getelementptr i64, i64* %b, i32 %iv
+  %data.a = load i64, i64* %addr.a
+  %data.b = load i64, i64* %addr.b
+  %ugt = icmp ugt i64 %data.a, %data.b
+  %umax = select i1 %ugt, i64 %data.a, i64 %data.b
+  %acc.next = add i64 %umax, %acc
+  %addr.c = getelementptr i64, i64* %c, i32 %iv
+  store i64 %umax, i64* %addr.c
+  %count = add nuw i32 %iv, 1
+  %end = icmp ne i32 %count, 100
+  br i1 %end, label %loop, label %exit
+
+exit:
+  ret i64 %acc.next
+}
+
+define i64 @test_i64_select_minsize(i64* %a, i64* %b, i64* %c) #1 {
+; CHECK-V8-LABEL: @test_i64_select_minsize(
+; CHECK-V8-NEXT:  entry:
+; CHECK-V8-NEXT:    br label [[LOOP:%.*]]
+; CHECK-V8:       loop:
+; CHECK-V8-NEXT:    [[IV:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[COUNT:%.*]], [[LOOP]] ]
+; CHECK-V8-NEXT:    [[ACC:%.*]] = phi i64 [ 0, [[ENTRY]] ], [ [[ACC_NEXT:%.*]], [[LOOP]] ]
+; CHECK-V8-NEXT:    [[ADDR_A:%.*]] = getelementptr i64, i64* [[A:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[ADDR_B:%.*]] = getelementptr i64, i64* [[B:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    [[DATA_A:%.*]] = load i64, i64* [[ADDR_A]], align 4
+; CHECK-V8-NEXT:    [[DATA_B:%.*]] = load i64, i64* [[ADDR_B]], align 4
+; CHECK-V8-NEXT:    [[UGT:%.*]] = icmp ugt i64 [[DATA_A]], [[DATA_B]]
+; CHECK-V8-NEXT:    [[UMAX:%.*]] = select i1 [[UGT]], i64 [[DATA_A]], i64 [[DATA_B]]
+; CHECK-V8-NEXT:    [[ACC_NEXT]] = add i64 [[UMAX]], [[ACC]]
+; CHECK-V8-NEXT:    [[ADDR_C:%.*]] = getelementptr i64, i64* [[C:%.*]], i32 [[IV]]
+; CHECK-V8-NEXT:    store i64 [[UMAX]], i64* [[ADDR_C]], align 4
+; CHECK-V8-NEXT:    [[COUNT]] = add nuw i32 [[IV]], 1
+; CHECK-V8-NEXT:    [[END:%.*]] = icmp ne i32 [[COUNT]], 100
+; CHECK-V8-NEXT:    br i1 [[END]], label [[LOOP]], label [[EXIT:%.*]]
+; CHECK-V8:       exit:
+; CHECK-V8-NEXT:    [[ACC_NEXT_LCSSA:%.*]] = phi i64 [ [[ACC_NEXT]], [[LOOP]] ]
+; CHECK-V8-NEXT:    ret i64 [[ACC_NEXT_LCSSA]]
+;
+entry:
+  br label %loop
+
+loop:
+  %iv = phi i32 [ 0, %entry ], [ %count, %loop ]
+  %acc = phi i64 [ 0, %entry], [ %acc.next, %loop ]
+  %addr.a = getelementptr i64, i64* %a, i32 %iv
+  %addr.b = getelementptr i64, i64* %b, i32 %iv
+  %data.a = load i64, i64* %addr.a
+  %data.b = load i64, i64* %addr.b
+  %ugt = icmp ugt i64 %data.a, %data.b
+  %umax = select i1 %ugt, i64 %data.a, i64 %data.b
+  %acc.next = add i64 %umax, %acc
+  %addr.c = getelementptr i64, i64* %c, i32 %iv
+  store i64 %umax, i64* %addr.c
+  %count = add nuw i32 %iv, 1
+  %end = icmp ne i32 %count, 100
+  br i1 %end, label %loop, label %exit
+
+exit:
+  ret i64 %acc.next
+}
+
+attributes #0 = { optsize }
+attributes #1 = { minsize optsize }
diff --git a/llvm/test/Transforms/LoopUnroll/ARM/unroll-optsize.ll b/llvm/test/Transforms/LoopUnroll/ARM/unroll-optsize.ll
new file mode 100644
index 0000000..1329aa9
--- /dev/null
+++ b/llvm/test/Transforms/LoopUnroll/ARM/unroll-optsize.ll
@@ -0,0 +1,174 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -loop-unroll -mtriple=thumbv7a-unknown-linux-gnueabihf -S %s | FileCheck %s
+
+; Check we unroll even with optsize, if the result is smaller, either because
+; we have single iteration loops or bodies with constant folding opportunities
+; after fully unrolling.
+
+; TODO: Looks like we should enable some unrolling for M-class, even when
+; optimising for size.
+
+declare i32 @get()
+
+define void @fully_unrolled_single_iteration(i32* %src) #0 {
+; CHECK-LABEL: @fully_unrolled_single_iteration(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARR:%.*]] = alloca [4 x i32], align 4
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[V:%.*]] = load i32, i32* [[SRC:%.*]]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
+; CHECK-NEXT:    store i32 [[V]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
+; CHECK-NEXT:    call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arr = alloca [4 x i32], align 4
+  br label %for.body
+
+for.body:                                         ; preds = %for.body, %entry
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+  %src.idx = getelementptr inbounds i32, i32* %src, i64 %indvars.iv
+  %v = load i32, i32* %src.idx
+  %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
+  store i32 %v, i32* %arrayidx, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond = icmp eq i64 %indvars.iv.next, 1
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.cond
+  %ptr = bitcast [4 x i32]* %arr to i32*
+  call void @use(i32* nonnull %ptr) #4
+  ret void
+}
+
+
+define void @fully_unrolled_smaller() #0 {
+; CHECK-LABEL: @fully_unrolled_smaller(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARR:%.*]] = alloca [4 x i32], align 4
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
+; CHECK-NEXT:    store i32 16, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 1
+; CHECK-NEXT:    store i32 4104, i32* [[ARRAYIDX_1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 2
+; CHECK-NEXT:    store i32 1048592, i32* [[ARRAYIDX_2]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 3
+; CHECK-NEXT:    store i32 268435480, i32* [[ARRAYIDX_3]], align 4
+; CHECK-NEXT:    [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
+; CHECK-NEXT:    call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arr = alloca [4 x i32], align 4
+  br label %for.body
+
+for.body:                                         ; preds = %for.body, %entry
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+  %indvars.iv.tr = trunc i64 %indvars.iv to i32
+  %shl.0 = shl i32 %indvars.iv.tr, 3
+  %shl.1 = shl i32 16, %shl.0
+  %or = or i32 %shl.1, %shl.0
+  %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
+  store i32 %or, i32* %arrayidx, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond = icmp eq i64 %indvars.iv, 3
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.cond
+  %ptr = bitcast [4 x i32]* %arr to i32*
+  call void @use(i32* nonnull %ptr) #4
+  ret void
+}
+
+define void @fully_unrolled_smaller_Oz() #1 {
+; CHECK-LABEL: @fully_unrolled_smaller_Oz(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARR:%.*]] = alloca [4 x i32], align 4
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 0
+; CHECK-NEXT:    store i32 16, i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 1
+; CHECK-NEXT:    store i32 4104, i32* [[ARRAYIDX_1]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 2
+; CHECK-NEXT:    store i32 1048592, i32* [[ARRAYIDX_2]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 3
+; CHECK-NEXT:    store i32 268435480, i32* [[ARRAYIDX_3]], align 4
+; CHECK-NEXT:    [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
+; CHECK-NEXT:    call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arr = alloca [4 x i32], align 4
+  br label %for.body
+
+for.body:                                         ; preds = %for.body, %entry
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+  %indvars.iv.tr = trunc i64 %indvars.iv to i32
+  %shl.0 = shl i32 %indvars.iv.tr, 3
+  %shl.1 = shl i32 16, %shl.0
+  %or = or i32 %shl.1, %shl.0
+  %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
+  store i32 %or, i32* %arrayidx, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond = icmp eq i64 %indvars.iv, 3
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.cond
+  %ptr = bitcast [4 x i32]* %arr to i32*
+  call void @use(i32* nonnull %ptr) #4
+  ret void
+}
+
+
+define void @fully_unrolled_bigger() #0 {
+; CHECK-LABEL: @fully_unrolled_bigger(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[ARR:%.*]] = alloca [4 x i32], align 4
+; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
+; CHECK:       for.body:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT:    [[INDVARS_IV_TR:%.*]] = trunc i64 [[INDVARS_IV]] to i32
+; CHECK-NEXT:    [[SHL_0:%.*]] = shl i32 [[INDVARS_IV_TR]], 3
+; CHECK-NEXT:    [[SHL_1:%.*]] = shl i32 16, [[SHL_0]]
+; CHECK-NEXT:    [[OR:%.*]] = or i32 [[SHL_1]], [[SHL_0]]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [4 x i32], [4 x i32]* [[ARR]], i64 0, i64 [[INDVARS_IV]]
+; CHECK-NEXT:    store i32 [[OR]], i32* [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV]], 7
+; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_BODY]]
+; CHECK:       for.cond.cleanup:
+; CHECK-NEXT:    [[PTR:%.*]] = bitcast [4 x i32]* [[ARR]] to i32*
+; CHECK-NEXT:    call void @use(i32* nonnull [[PTR]])
+; CHECK-NEXT:    ret void
+;
+entry:
+  %arr = alloca [4 x i32], align 4
+  br label %for.body
+
+for.body:                                         ; preds = %for.body, %entry
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ 0, %entry ]
+  %indvars.iv.tr = trunc i64 %indvars.iv to i32
+  %shl.0 = shl i32 %indvars.iv.tr, 3
+  %shl.1 = shl i32 16, %shl.0
+  %or = or i32 %shl.1, %shl.0
+  %arrayidx = getelementptr inbounds [4 x i32], [4 x i32]* %arr, i64 0, i64 %indvars.iv
+  store i32 %or, i32* %arrayidx, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %exitcond = icmp eq i64 %indvars.iv, 7
+  br i1 %exitcond, label %for.cond.cleanup, label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.cond
+  %ptr = bitcast [4 x i32]* %arr to i32*
+  call void @use(i32* nonnull %ptr) #4
+  ret void
+}
+
+declare void @use(i32*)
+
+attributes #0 = { optsize }
+attributes #1 = { minsize optsize }