Merge "Delete ArtMethod gc_map_ field"
diff --git a/compiler/dex/global_value_numbering.cc b/compiler/dex/global_value_numbering.cc
index 3daeb10..578952b 100644
--- a/compiler/dex/global_value_numbering.cc
+++ b/compiler/dex/global_value_numbering.cc
@@ -104,10 +104,7 @@
     if (bb->catch_entry) {
       merge_type = LocalValueNumbering::kCatchMerge;
     } else if (bb->last_mir_insn != nullptr &&
-        (bb->last_mir_insn->dalvikInsn.opcode == Instruction::RETURN_VOID ||
-         bb->last_mir_insn->dalvikInsn.opcode == Instruction::RETURN ||
-         bb->last_mir_insn->dalvikInsn.opcode == Instruction::RETURN_OBJECT ||
-         bb->last_mir_insn->dalvikInsn.opcode == Instruction::RETURN_WIDE) &&
+        IsInstructionReturn(bb->last_mir_insn->dalvikInsn.opcode) &&
         bb->GetFirstNonPhiInsn() == bb->last_mir_insn) {
       merge_type = LocalValueNumbering::kReturnMerge;
     }
diff --git a/compiler/dex/local_value_numbering.cc b/compiler/dex/local_value_numbering.cc
index c502b0c..e0c4e27 100644
--- a/compiler/dex/local_value_numbering.cc
+++ b/compiler/dex/local_value_numbering.cc
@@ -343,8 +343,8 @@
       merge_names_(allocator->Adapter()),
       merge_map_(std::less<ScopedArenaVector<BasicBlockId>>(), allocator->Adapter()),
       merge_new_memory_version_(kNoValue) {
-  std::fill_n(unresolved_sfield_version_, kFieldTypeCount, 0u);
-  std::fill_n(unresolved_ifield_version_, kFieldTypeCount, 0u);
+  std::fill_n(unresolved_sfield_version_, arraysize(unresolved_sfield_version_), 0u);
+  std::fill_n(unresolved_ifield_version_, arraysize(unresolved_ifield_version_), 0u);
 }
 
 bool LocalValueNumbering::Equals(const LocalValueNumbering& other) const {
@@ -392,16 +392,20 @@
   if (merge_type == kCatchMerge) {
     // Memory is clobbered. Use new memory version and don't merge aliasing locations.
     global_memory_version_ = NewMemoryVersion(&merge_new_memory_version_);
-    std::fill_n(unresolved_sfield_version_, kFieldTypeCount, global_memory_version_);
-    std::fill_n(unresolved_ifield_version_, kFieldTypeCount, global_memory_version_);
+    std::fill_n(unresolved_sfield_version_, arraysize(unresolved_sfield_version_),
+                global_memory_version_);
+    std::fill_n(unresolved_ifield_version_, arraysize(unresolved_ifield_version_),
+                global_memory_version_);
     PruneNonAliasingRefsForCatch();
     return;
   }
 
   DCHECK(merge_type == kNormalMerge);
   global_memory_version_ = other.global_memory_version_;
-  std::copy_n(other.unresolved_ifield_version_, kFieldTypeCount, unresolved_ifield_version_);
-  std::copy_n(other.unresolved_sfield_version_, kFieldTypeCount, unresolved_sfield_version_);
+  std::copy_n(other.unresolved_ifield_version_, arraysize(unresolved_sfield_version_),
+              unresolved_ifield_version_);
+  std::copy_n(other.unresolved_sfield_version_, arraysize(unresolved_ifield_version_),
+              unresolved_sfield_version_);
   sfield_value_map_ = other.sfield_value_map_;
   CopyAliasingValuesMap(&aliasing_ifield_value_map_, other.aliasing_ifield_value_map_);
   CopyAliasingValuesMap(&aliasing_array_value_map_, other.aliasing_array_value_map_);
@@ -413,9 +417,11 @@
 bool LocalValueNumbering::SameMemoryVersion(const LocalValueNumbering& other) const {
   return
       global_memory_version_ == other.global_memory_version_ &&
-      std::equal(unresolved_ifield_version_, unresolved_ifield_version_ + kFieldTypeCount,
+      std::equal(unresolved_ifield_version_,
+                 unresolved_ifield_version_ + arraysize(unresolved_ifield_version_),
                  other.unresolved_ifield_version_) &&
-      std::equal(unresolved_sfield_version_, unresolved_sfield_version_ + kFieldTypeCount,
+      std::equal(unresolved_sfield_version_,
+                 unresolved_sfield_version_ + arraysize(unresolved_sfield_version_),
                  other.unresolved_sfield_version_);
 }
 
@@ -442,18 +448,22 @@
   }
   if (new_global_version) {
     global_memory_version_ = NewMemoryVersion(&merge_new_memory_version_);
-    std::fill_n(unresolved_sfield_version_, kFieldTypeCount, merge_new_memory_version_);
-    std::fill_n(unresolved_ifield_version_, kFieldTypeCount, merge_new_memory_version_);
+    std::fill_n(unresolved_sfield_version_, arraysize(unresolved_sfield_version_),
+                merge_new_memory_version_);
+    std::fill_n(unresolved_ifield_version_, arraysize(unresolved_ifield_version_),
+                merge_new_memory_version_);
   } else {
     // Initialize with a copy of memory versions from the comparison LVN.
     global_memory_version_ = cmp->global_memory_version_;
-    std::copy_n(cmp->unresolved_ifield_version_, kFieldTypeCount, unresolved_ifield_version_);
-    std::copy_n(cmp->unresolved_sfield_version_, kFieldTypeCount, unresolved_sfield_version_);
+    std::copy_n(cmp->unresolved_ifield_version_, arraysize(unresolved_sfield_version_),
+                unresolved_ifield_version_);
+    std::copy_n(cmp->unresolved_sfield_version_, arraysize(unresolved_ifield_version_),
+                unresolved_sfield_version_);
     for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
       if (lvn == cmp) {
         continue;
       }
-      for (size_t i = 0; i != kFieldTypeCount; ++i) {
+      for (size_t i = 0; i != kDexMemAccessTypeCount; ++i) {
         if (lvn->unresolved_ifield_version_[i] != cmp->unresolved_ifield_version_[i]) {
           unresolved_ifield_version_[i] = NewMemoryVersion(&merge_new_memory_version_);
         }
diff --git a/compiler/dex/local_value_numbering.h b/compiler/dex/local_value_numbering.h
index 8613f97..9b89c95 100644
--- a/compiler/dex/local_value_numbering.h
+++ b/compiler/dex/local_value_numbering.h
@@ -22,6 +22,7 @@
 #include "compiler_internals.h"
 #include "global_value_numbering.h"
 #include "utils/arena_object.h"
+#include "utils/dex_instruction_utils.h"
 
 namespace art {
 
@@ -76,17 +77,6 @@
   // A set of value names.
   typedef GlobalValueNumbering::ValueNameSet ValueNameSet;
 
-  // Field types correspond to the ordering of GET/PUT instructions; this order is the same
-  // for IGET, IPUT, SGET, SPUT, AGET and APUT:
-  // op         0
-  // op_WIDE    1
-  // op_OBJECT  2
-  // op_BOOLEAN 3
-  // op_BYTE    4
-  // op_CHAR    5
-  // op_SHORT   6
-  static constexpr size_t kFieldTypeCount = 7;
-
   // Key is s_reg, value is value name.
   typedef ScopedArenaSafeMap<uint16_t, uint16_t> SregValueMap;
 
@@ -364,8 +354,8 @@
 
   // Data for dealing with memory clobbering and store/load aliasing.
   uint16_t global_memory_version_;
-  uint16_t unresolved_sfield_version_[kFieldTypeCount];
-  uint16_t unresolved_ifield_version_[kFieldTypeCount];
+  uint16_t unresolved_sfield_version_[kDexMemAccessTypeCount];
+  uint16_t unresolved_ifield_version_[kDexMemAccessTypeCount];
   // Value names of references to objects that cannot be reached through a different value name.
   ValueNameSet non_aliasing_refs_;
   // Previously non-aliasing refs that escaped but can still be used for non-aliasing AGET/IGET.
diff --git a/compiler/dex/quick/dex_file_method_inliner.cc b/compiler/dex/quick/dex_file_method_inliner.cc
index e12d305..3039852 100644
--- a/compiler/dex/quick/dex_file_method_inliner.cc
+++ b/compiler/dex/quick/dex_file_method_inliner.cc
@@ -112,18 +112,18 @@
 uint32_t GetInvokeReg(MIR* invoke, uint32_t arg) {
   DCHECK_LT(arg, invoke->dalvikInsn.vA);
   DCHECK(!MIR::DecodedInstruction::IsPseudoMirOp(invoke->dalvikInsn.opcode));
-  if (Instruction::FormatOf(invoke->dalvikInsn.opcode) == Instruction::k3rc) {
-    return invoke->dalvikInsn.vC + arg;  // Non-range invoke.
+  if (IsInvokeInstructionRange(invoke->dalvikInsn.opcode)) {
+    return invoke->dalvikInsn.vC + arg;  // Range invoke.
   } else {
     DCHECK_EQ(Instruction::FormatOf(invoke->dalvikInsn.opcode), Instruction::k35c);
-    return invoke->dalvikInsn.arg[arg];  // Range invoke.
+    return invoke->dalvikInsn.arg[arg];  // Non-range invoke.
   }
 }
 
 bool WideArgIsInConsecutiveDalvikRegs(MIR* invoke, uint32_t arg) {
   DCHECK_LT(arg + 1, invoke->dalvikInsn.vA);
   DCHECK(!MIR::DecodedInstruction::IsPseudoMirOp(invoke->dalvikInsn.opcode));
-  return Instruction::FormatOf(invoke->dalvikInsn.opcode) == Instruction::k3rc ||
+  return IsInvokeInstructionRange(invoke->dalvikInsn.opcode) ||
       invoke->dalvikInsn.arg[arg + 1u] == invoke->dalvikInsn.arg[arg] + 1u;
 }
 
@@ -573,8 +573,7 @@
     // If the invoke has not been eliminated yet, check now whether we should do it.
     // This is done so that dataflow analysis does not get tripped up seeing nop invoke.
     if (static_cast<int>(invoke->dalvikInsn.opcode) != kMirOpNop) {
-      bool is_static = invoke->dalvikInsn.opcode == Instruction::INVOKE_STATIC ||
-          invoke->dalvikInsn.opcode == Instruction::INVOKE_STATIC_RANGE;
+      bool is_static = IsInstructionInvokeStatic(invoke->dalvikInsn.opcode);
       if (is_static || (invoke->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) {
         // No null object register involved here so we can eliminate the invoke.
         invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
@@ -804,9 +803,7 @@
     return !data.is_volatile;
   }
 
-  DCHECK_EQ(data.method_is_static != 0u,
-            invoke->dalvikInsn.opcode == Instruction::INVOKE_STATIC ||
-            invoke->dalvikInsn.opcode == Instruction::INVOKE_STATIC_RANGE);
+  DCHECK_EQ(data.method_is_static != 0u, IsInstructionInvokeStatic(invoke->dalvikInsn.opcode));
   bool object_is_this = (data.method_is_static == 0u && data.object_arg == 0u);
   if (!object_is_this) {
     // TODO: Implement inlining of IGET on non-"this" registers (needs correct stack trace for NPE).
@@ -865,9 +862,7 @@
     return false;
   }
 
-  DCHECK_EQ(data.method_is_static != 0u,
-            invoke->dalvikInsn.opcode == Instruction::INVOKE_STATIC ||
-            invoke->dalvikInsn.opcode == Instruction::INVOKE_STATIC_RANGE);
+  DCHECK_EQ(data.method_is_static != 0u, IsInstructionInvokeStatic(invoke->dalvikInsn.opcode));
   bool object_is_this = (data.method_is_static == 0u && data.object_arg == 0u);
   if (!object_is_this) {
     // TODO: Implement inlining of IPUT on non-"this" registers (needs correct stack trace for NPE).
diff --git a/compiler/dex/vreg_analysis.cc b/compiler/dex/vreg_analysis.cc
index f6c7d52..a541c7d 100644
--- a/compiler/dex/vreg_analysis.cc
+++ b/compiler/dex/vreg_analysis.cc
@@ -276,8 +276,7 @@
       }
       int num_uses = mir->dalvikInsn.vA;
       // If this is a non-static invoke, mark implicit "this"
-      if (((mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC) &&
-          (mir->dalvikInsn.opcode != Instruction::INVOKE_STATIC_RANGE))) {
+      if (!IsInstructionInvokeStatic(mir->dalvikInsn.opcode)) {
         reg_location_[uses[next]].defined = true;
         reg_location_[uses[next]].ref = true;
         type_mismatch |= reg_location_[uses[next]].wide;
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index 6dd4207..c49cf7e 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -50,10 +50,9 @@
   SsaLivenessAnalysis liveness(*graph, &codegen);
   liveness.Analyze();
 
-  ASSERT_EQ(liveness.GetLinearPostOrder().Size(), number_of_blocks);
+  ASSERT_EQ(liveness.GetLinearOrder().Size(), number_of_blocks);
   for (size_t i = 0; i < number_of_blocks; ++i) {
-    ASSERT_EQ(liveness.GetLinearPostOrder().Get(number_of_blocks - i - 1)->GetBlockId(),
-              expected_order[i]);
+    ASSERT_EQ(liveness.GetLinearOrder().Get(i)->GetBlockId(), expected_order[i]);
   }
 }
 
@@ -194,4 +193,58 @@
   TestCode(data, blocks, 12);
 }
 
+TEST(LinearizeTest, CFG6) {
+  //            Block0
+  //              |
+  //            Block1
+  //              |
+  //            Block2 ++++++++++++++
+  //              |                 +
+  //            Block3              +
+  //            /     \             +
+  //       Block8     Block4        +
+  //         |         /   \        +
+  //       Block5 <- Block9 Block6  +
+  //         |
+  //       Block7
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::GOTO | 0x0100,
+    Instruction::IF_EQ, 0x0004,
+    Instruction::IF_EQ, 0x0003,
+    Instruction::RETURN_VOID,
+    Instruction::GOTO | 0xFA00);
+
+  const int blocks[] = {0, 1, 2, 3, 4, 6, 9, 8, 5, 7};
+  TestCode(data, blocks, arraysize(blocks));
+}
+
+TEST(LinearizeTest, CFG7) {
+  // Structure of this graph (+ are back edges)
+  //            Block0
+  //              |
+  //            Block1
+  //              |
+  //            Block2 ++++++++
+  //              |           +
+  //            Block3        +
+  //            /    \        +
+  //        Block4  Block8    +
+  //        /  \        |     +
+  //   Block5 Block9 - Block6 +
+  //     |
+  //   Block7
+  //
+  const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::GOTO | 0x0100,
+    Instruction::IF_EQ, 0x0005,
+    Instruction::IF_EQ, 0x0003,
+    Instruction::RETURN_VOID,
+    Instruction::GOTO | 0xFA00);
+
+  const int blocks[] = {0, 1, 2, 3, 4, 9, 8, 6, 5, 7};
+  TestCode(data, blocks, arraysize(blocks));
+}
+
 }  // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index b47549a..f562113 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -236,7 +236,7 @@
     return false;
   }
 
-  int NumberOfBackEdges() const {
+  size_t NumberOfBackEdges() const {
     return back_edges_.Size();
   }
 
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 0085b27..660a5c5 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -28,11 +28,6 @@
   ComputeLiveness();
 }
 
-static bool IsLoopExit(HLoopInformation* current, HLoopInformation* to) {
-  // `to` is either not part of a loop, or `current` is an inner loop of `to`.
-  return to == nullptr || (current != to && current->IsIn(*to));
-}
-
 static bool IsLoop(HLoopInformation* info) {
   return info != nullptr;
 }
@@ -48,46 +43,64 @@
       && inner->IsIn(*outer);
 }
 
-static void VisitBlockForLinearization(HBasicBlock* block,
-                                       GrowableArray<HBasicBlock*>* order,
-                                       ArenaBitVector* visited) {
-  if (visited->IsBitSet(block->GetBlockId())) {
-    return;
-  }
-  visited->SetBit(block->GetBlockId());
-  size_t number_of_successors = block->GetSuccessors().Size();
-  if (number_of_successors == 0) {
-    // Nothing to do.
-  } else if (number_of_successors == 1) {
-    VisitBlockForLinearization(block->GetSuccessors().Get(0), order, visited);
-  } else {
-    DCHECK_EQ(number_of_successors, 2u);
-    HBasicBlock* first_successor = block->GetSuccessors().Get(0);
-    HBasicBlock* second_successor = block->GetSuccessors().Get(1);
-    HLoopInformation* my_loop = block->GetLoopInformation();
-    HLoopInformation* first_loop = first_successor->GetLoopInformation();
-    HLoopInformation* second_loop = second_successor->GetLoopInformation();
-
-    if (!IsLoop(my_loop)) {
-      // Nothing to do. Current order is fine.
-    } else if (IsLoopExit(my_loop, second_loop) && InSameLoop(my_loop, first_loop)) {
-      // Visit the loop exit first in post order.
-      std::swap(first_successor, second_successor);
-    } else if (IsInnerLoop(my_loop, first_loop) && !IsInnerLoop(my_loop, second_loop)) {
-      // Visit the inner loop last in post order.
-      std::swap(first_successor, second_successor);
+static void AddToListForLinearization(GrowableArray<HBasicBlock*>* worklist, HBasicBlock* block) {
+  size_t insert_at = worklist->Size();
+  HLoopInformation* block_loop = block->GetLoopInformation();
+  for (; insert_at > 0; --insert_at) {
+    HBasicBlock* current = worklist->Get(insert_at - 1);
+    HLoopInformation* current_loop = current->GetLoopInformation();
+    if (InSameLoop(block_loop, current_loop)
+        || !IsLoop(current_loop)
+        || IsInnerLoop(current_loop, block_loop)) {
+      // The block can be processed immediately.
+      break;
     }
-    VisitBlockForLinearization(first_successor, order, visited);
-    VisitBlockForLinearization(second_successor, order, visited);
   }
-  order->Add(block);
+  worklist->InsertAt(insert_at, block);
 }
 
 void SsaLivenessAnalysis::LinearizeGraph() {
-  // For simplicity of the implementation, we create post linear order. The order for
-  // computing live ranges is the reverse of that order.
-  ArenaBitVector visited(graph_.GetArena(), graph_.GetBlocks().Size(), false);
-  VisitBlockForLinearization(graph_.GetEntryBlock(), &linear_post_order_, &visited);
+  // Create a reverse post ordering with the following properties:
+  // - Blocks in a loop are consecutive,
+  // - Back-edge is the last block before loop exits.
+
+  // (1): Record the number of forward predecessors for each block. This is to
+  //      ensure the resulting order is reverse post order. We could use the
+  //      current reverse post order in the graph, but it would require making
+  //      order queries to a GrowableArray, which is not the best data structure
+  //      for it.
+  GrowableArray<uint32_t> forward_predecessors(graph_.GetArena(), graph_.GetBlocks().Size());
+  forward_predecessors.SetSize(graph_.GetBlocks().Size());
+  for (size_t i = 0, e = graph_.GetBlocks().Size(); i < e; ++i) {
+    HBasicBlock* block = graph_.GetBlocks().Get(i);
+    size_t number_of_forward_predecessors = block->GetPredecessors().Size();
+    if (block->IsLoopHeader()) {
+      // We rely on having simplified the CFG.
+      DCHECK_EQ(1u, block->GetLoopInformation()->NumberOfBackEdges());
+      number_of_forward_predecessors--;
+    }
+    forward_predecessors.Put(block->GetBlockId(), number_of_forward_predecessors);
+  }
+
+  // (2): Following a worklist approach, first start with the entry block, and
+  //      iterate over the successors. When all non-back edge predecessors of a
+  //      successor block are visited, the successor block is added in the worklist
+  //      following an order that satisfies the requirements to build our linear graph.
+  GrowableArray<HBasicBlock*> worklist(graph_.GetArena(), 1);
+  worklist.Add(graph_.GetEntryBlock());
+  do {
+    HBasicBlock* current = worklist.Pop();
+    linear_order_.Add(current);
+    for (size_t i = 0, e = current->GetSuccessors().Size(); i < e; ++i) {
+      HBasicBlock* successor = current->GetSuccessors().Get(i);
+      int block_id = successor->GetBlockId();
+      size_t number_of_remaining_predecessors = forward_predecessors.Get(block_id);
+      if (number_of_remaining_predecessors == 1) {
+        AddToListForLinearization(&worklist, successor);
+      }
+      forward_predecessors.Put(block_id, number_of_remaining_predecessors - 1);
+    }
+  } while (!worklist.IsEmpty());
 }
 
 void SsaLivenessAnalysis::NumberInstructions() {
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index ca08d5b..2312389 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -582,7 +582,7 @@
   SsaLivenessAnalysis(const HGraph& graph, CodeGenerator* codegen)
       : graph_(graph),
         codegen_(codegen),
-        linear_post_order_(graph.GetArena(), graph.GetBlocks().Size()),
+        linear_order_(graph.GetArena(), graph.GetBlocks().Size()),
         block_infos_(graph.GetArena(), graph.GetBlocks().Size()),
         instructions_from_ssa_index_(graph.GetArena(), 0),
         instructions_from_lifetime_position_(graph.GetArena(), 0),
@@ -604,8 +604,8 @@
     return &block_infos_.Get(block.GetBlockId())->kill_;
   }
 
-  const GrowableArray<HBasicBlock*>& GetLinearPostOrder() const {
-    return linear_post_order_;
+  const GrowableArray<HBasicBlock*>& GetLinearOrder() const {
+    return linear_order_;
   }
 
   HInstruction* GetInstructionFromSsaIndex(size_t index) const {
@@ -661,7 +661,7 @@
 
   const HGraph& graph_;
   CodeGenerator* const codegen_;
-  GrowableArray<HBasicBlock*> linear_post_order_;
+  GrowableArray<HBasicBlock*> linear_order_;
   GrowableArray<BlockInfo*> block_infos_;
 
   // Temporary array used when computing live_in, live_out, and kill sets.
@@ -674,38 +674,43 @@
   DISALLOW_COPY_AND_ASSIGN(SsaLivenessAnalysis);
 };
 
-class HLinearOrderIterator : public ValueObject {
- public:
-  explicit HLinearOrderIterator(const SsaLivenessAnalysis& liveness)
-      : post_order_(liveness.GetLinearPostOrder()), index_(liveness.GetLinearPostOrder().Size()) {}
-
-  bool Done() const { return index_ == 0; }
-  HBasicBlock* Current() const { return post_order_.Get(index_ -1); }
-  void Advance() { --index_; DCHECK_GE(index_, 0U); }
-
- private:
-  const GrowableArray<HBasicBlock*>& post_order_;
-  size_t index_;
-
-  DISALLOW_COPY_AND_ASSIGN(HLinearOrderIterator);
-};
-
 class HLinearPostOrderIterator : public ValueObject {
  public:
   explicit HLinearPostOrderIterator(const SsaLivenessAnalysis& liveness)
-      : post_order_(liveness.GetLinearPostOrder()), index_(0) {}
+      : order_(liveness.GetLinearOrder()), index_(liveness.GetLinearOrder().Size()) {}
 
-  bool Done() const { return index_ == post_order_.Size(); }
-  HBasicBlock* Current() const { return post_order_.Get(index_); }
-  void Advance() { ++index_; }
+  bool Done() const { return index_ == 0; }
+
+  HBasicBlock* Current() const { return order_.Get(index_ -1); }
+
+  void Advance() {
+    --index_;
+    DCHECK_GE(index_, 0U);
+  }
 
  private:
-  const GrowableArray<HBasicBlock*>& post_order_;
+  const GrowableArray<HBasicBlock*>& order_;
   size_t index_;
 
   DISALLOW_COPY_AND_ASSIGN(HLinearPostOrderIterator);
 };
 
+class HLinearOrderIterator : public ValueObject {
+ public:
+  explicit HLinearOrderIterator(const SsaLivenessAnalysis& liveness)
+      : order_(liveness.GetLinearOrder()), index_(0) {}
+
+  bool Done() const { return index_ == order_.Size(); }
+  HBasicBlock* Current() const { return order_.Get(index_); }
+  void Advance() { ++index_; }
+
+ private:
+  const GrowableArray<HBasicBlock*>& order_;
+  size_t index_;
+
+  DISALLOW_COPY_AND_ASSIGN(HLinearOrderIterator);
+};
+
 }  // namespace art
 
 #endif  // ART_COMPILER_OPTIMIZING_SSA_LIVENESS_ANALYSIS_H_
diff --git a/compiler/utils/dex_instruction_utils.h b/compiler/utils/dex_instruction_utils.h
index ad7d750..2c6e525 100644
--- a/compiler/utils/dex_instruction_utils.h
+++ b/compiler/utils/dex_instruction_utils.h
@@ -49,6 +49,10 @@
 
 // NOTE: The following functions disregard quickened instructions.
 
+constexpr bool IsInstructionReturn(Instruction::Code opcode) {
+  return Instruction::RETURN_VOID <= opcode && opcode <= Instruction::RETURN_OBJECT;
+}
+
 constexpr bool IsInstructionInvoke(Instruction::Code opcode) {
   return Instruction::INVOKE_VIRTUAL <= opcode && opcode <= Instruction::INVOKE_INTERFACE_RANGE &&
       opcode != Instruction::RETURN_VOID_BARRIER;
diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk
index 29da2f6..47d186a 100644
--- a/test/Android.run-test.mk
+++ b/test/Android.run-test.mk
@@ -306,6 +306,7 @@
   010-instance \
   012-math \
   023-many-interfaces \
+  027-arithmetic \
   037-inherit \
   044-proxy \
   045-reflect-array \
@@ -329,6 +330,7 @@
   427-bounds \
   428-optimizing-arith-rem \
   430-live-register-slow-path \
+  431-optimizing-arith-shifts \
   701-easy-div-rem \
   800-smali \