blob: 320f01a727fc2abffa0e601dea9a199c05680e3e [file] [log] [blame]
Alexandre Rames22aa54b2016-10-18 09:32:29 +01001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include <string>
18
19#include "prepare_for_register_allocation.h"
20#include "scheduler.h"
21
22#ifdef ART_ENABLE_CODEGEN_arm64
23#include "scheduler_arm64.h"
24#endif
25
xueliang.zhongf7caf682017-03-01 16:07:02 +000026#ifdef ART_ENABLE_CODEGEN_arm
27#include "scheduler_arm.h"
28#endif
29
Alexandre Rames22aa54b2016-10-18 09:32:29 +010030namespace art {
31
32void SchedulingGraph::AddDependency(SchedulingNode* node,
33 SchedulingNode* dependency,
34 bool is_data_dependency) {
35 if (node == nullptr || dependency == nullptr) {
36 // A `nullptr` node indicates an instruction out of scheduling range (eg. in
37 // an other block), so we do not need to add a dependency edge to the graph.
38 return;
39 }
40
41 if (is_data_dependency) {
42 if (!HasImmediateDataDependency(node, dependency)) {
43 node->AddDataPredecessor(dependency);
44 }
45 } else if (!HasImmediateOtherDependency(node, dependency)) {
46 node->AddOtherPredecessor(dependency);
47 }
48}
49
50static bool MayHaveReorderingDependency(SideEffects node, SideEffects other) {
51 // Read after write.
52 if (node.MayDependOn(other)) {
53 return true;
54 }
55
56 // Write after read.
57 if (other.MayDependOn(node)) {
58 return true;
59 }
60
61 // Memory write after write.
62 if (node.DoesAnyWrite() && other.DoesAnyWrite()) {
63 return true;
64 }
65
66 return false;
67}
68
69
70// Check whether `node` depends on `other`, taking into account `SideEffect`
71// information and `CanThrow` information.
72static bool HasSideEffectDependency(const HInstruction* node, const HInstruction* other) {
73 if (MayHaveReorderingDependency(node->GetSideEffects(), other->GetSideEffects())) {
74 return true;
75 }
76
77 if (other->CanThrow() && node->GetSideEffects().DoesAnyWrite()) {
78 return true;
79 }
80
81 if (other->GetSideEffects().DoesAnyWrite() && node->CanThrow()) {
82 return true;
83 }
84
85 if (other->CanThrow() && node->CanThrow()) {
86 return true;
87 }
88
89 // Check side-effect dependency between ArrayGet and BoundsCheck.
90 if (node->IsArrayGet() && other->IsBoundsCheck() && node->InputAt(1) == other) {
91 return true;
92 }
93
94 return false;
95}
96
97void SchedulingGraph::AddDependencies(HInstruction* instruction, bool is_scheduling_barrier) {
98 SchedulingNode* instruction_node = GetNode(instruction);
99
100 // Define-use dependencies.
101 for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
102 AddDataDependency(GetNode(use.GetUser()), instruction_node);
103 }
104
105 // Scheduling barrier dependencies.
106 DCHECK(!is_scheduling_barrier || contains_scheduling_barrier_);
107 if (contains_scheduling_barrier_) {
108 // A barrier depends on instructions after it. And instructions before the
109 // barrier depend on it.
110 for (HInstruction* other = instruction->GetNext(); other != nullptr; other = other->GetNext()) {
111 SchedulingNode* other_node = GetNode(other);
112 bool other_is_barrier = other_node->IsSchedulingBarrier();
113 if (is_scheduling_barrier || other_is_barrier) {
114 AddOtherDependency(other_node, instruction_node);
115 }
116 if (other_is_barrier) {
117 // This other scheduling barrier guarantees ordering of instructions after
118 // it, so avoid creating additional useless dependencies in the graph.
119 // For example if we have
120 // instr_1
121 // barrier_2
122 // instr_3
123 // barrier_4
124 // instr_5
125 // we only create the following non-data dependencies
126 // 1 -> 2
127 // 2 -> 3
128 // 2 -> 4
129 // 3 -> 4
130 // 4 -> 5
131 // and do not create
132 // 1 -> 4
133 // 2 -> 5
134 // Note that in this example we could also avoid creating the dependency
135 // `2 -> 4`. But if we remove `instr_3` that dependency is required to
136 // order the barriers. So we generate it to avoid a special case.
137 break;
138 }
139 }
140 }
141
142 // Side effect dependencies.
143 if (!instruction->GetSideEffects().DoesNothing() || instruction->CanThrow()) {
144 for (HInstruction* other = instruction->GetNext(); other != nullptr; other = other->GetNext()) {
145 SchedulingNode* other_node = GetNode(other);
146 if (other_node->IsSchedulingBarrier()) {
147 // We have reached a scheduling barrier so we can stop further
148 // processing.
149 DCHECK(HasImmediateOtherDependency(other_node, instruction_node));
150 break;
151 }
152 if (HasSideEffectDependency(other, instruction)) {
153 AddOtherDependency(other_node, instruction_node);
154 }
155 }
156 }
157
158 // Environment dependencies.
159 // We do not need to process those if the instruction is a scheduling barrier,
160 // since the barrier already has non-data dependencies on all following
161 // instructions.
162 if (!is_scheduling_barrier) {
163 for (const HUseListNode<HEnvironment*>& use : instruction->GetEnvUses()) {
164 // Note that here we could stop processing if the environment holder is
165 // across a scheduling barrier. But checking this would likely require
166 // more work than simply iterating through environment uses.
167 AddOtherDependency(GetNode(use.GetUser()->GetHolder()), instruction_node);
168 }
169 }
170}
171
172bool SchedulingGraph::HasImmediateDataDependency(const SchedulingNode* node,
173 const SchedulingNode* other) const {
174 return ContainsElement(node->GetDataPredecessors(), other);
175}
176
177bool SchedulingGraph::HasImmediateDataDependency(const HInstruction* instruction,
178 const HInstruction* other_instruction) const {
179 const SchedulingNode* node = GetNode(instruction);
180 const SchedulingNode* other = GetNode(other_instruction);
181 if (node == nullptr || other == nullptr) {
182 // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their
183 // corresponding SchedulingNode in the graph, and tell whether there is a dependency.
184 // Otherwise there is no dependency from SchedulingGraph's perspective, for example,
185 // instruction and other_instruction are in different basic blocks.
186 return false;
187 }
188 return HasImmediateDataDependency(node, other);
189}
190
191bool SchedulingGraph::HasImmediateOtherDependency(const SchedulingNode* node,
192 const SchedulingNode* other) const {
193 return ContainsElement(node->GetOtherPredecessors(), other);
194}
195
196bool SchedulingGraph::HasImmediateOtherDependency(const HInstruction* instruction,
197 const HInstruction* other_instruction) const {
198 const SchedulingNode* node = GetNode(instruction);
199 const SchedulingNode* other = GetNode(other_instruction);
200 if (node == nullptr || other == nullptr) {
201 // Both instructions must be in current basic block, i.e. the SchedulingGraph can see their
202 // corresponding SchedulingNode in the graph, and tell whether there is a dependency.
203 // Otherwise there is no dependency from SchedulingGraph's perspective, for example,
204 // instruction and other_instruction are in different basic blocks.
205 return false;
206 }
207 return HasImmediateOtherDependency(node, other);
208}
209
210static const std::string InstructionTypeId(const HInstruction* instruction) {
211 std::string id;
212 Primitive::Type type = instruction->GetType();
213 if (type == Primitive::kPrimNot) {
214 id.append("l");
215 } else {
216 id.append(Primitive::Descriptor(instruction->GetType()));
217 }
218 // Use lower-case to be closer to the `HGraphVisualizer` output.
219 id[0] = std::tolower(id[0]);
220 id.append(std::to_string(instruction->GetId()));
221 return id;
222}
223
224// Ideally we would reuse the graph visualizer code, but it is not available
225// from here and it is not worth moving all that code only for our use.
226static void DumpAsDotNode(std::ostream& output, const SchedulingNode* node) {
227 const HInstruction* instruction = node->GetInstruction();
228 // Use the instruction typed id as the node identifier.
229 std::string instruction_id = InstructionTypeId(instruction);
230 output << instruction_id << "[shape=record, label=\""
231 << instruction_id << ' ' << instruction->DebugName() << " [";
232 // List the instruction's inputs in its description. When visualizing the
233 // graph this helps differentiating data inputs from other dependencies.
234 const char* seperator = "";
235 for (const HInstruction* input : instruction->GetInputs()) {
236 output << seperator << InstructionTypeId(input);
237 seperator = ",";
238 }
239 output << "]";
240 // Other properties of the node.
241 output << "\\ninternal_latency: " << node->GetInternalLatency();
242 output << "\\ncritical_path: " << node->GetCriticalPath();
243 if (node->IsSchedulingBarrier()) {
244 output << "\\n(barrier)";
245 }
246 output << "\"];\n";
247 // We want program order to go from top to bottom in the graph output, so we
248 // reverse the edges and specify `dir=back`.
249 for (const SchedulingNode* predecessor : node->GetDataPredecessors()) {
250 const HInstruction* predecessor_instruction = predecessor->GetInstruction();
251 output << InstructionTypeId(predecessor_instruction) << ":s -> " << instruction_id << ":n "
252 << "[label=\"" << predecessor->GetLatency() << "\",dir=back]\n";
253 }
254 for (const SchedulingNode* predecessor : node->GetOtherPredecessors()) {
255 const HInstruction* predecessor_instruction = predecessor->GetInstruction();
256 output << InstructionTypeId(predecessor_instruction) << ":s -> " << instruction_id << ":n "
257 << "[dir=back,color=blue]\n";
258 }
259}
260
261void SchedulingGraph::DumpAsDotGraph(const std::string& description,
262 const ArenaVector<SchedulingNode*>& initial_candidates) {
263 // TODO(xueliang): ideally we should move scheduling information into HInstruction, after that
264 // we should move this dotty graph dump feature to visualizer, and have a compiler option for it.
265 std::ofstream output("scheduling_graphs.dot", std::ofstream::out | std::ofstream::app);
266 // Description of this graph, as a comment.
267 output << "// " << description << "\n";
268 // Start the dot graph. Use an increasing index for easier differentiation.
269 output << "digraph G {\n";
270 for (const auto& entry : nodes_map_) {
Vladimir Marko7d157fc2017-05-10 16:29:23 +0100271 SchedulingNode* node = entry.second;
272 DumpAsDotNode(output, node);
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100273 }
274 // Create a fake 'end_of_scheduling' node to help visualization of critical_paths.
Vladimir Marko7d157fc2017-05-10 16:29:23 +0100275 for (SchedulingNode* node : initial_candidates) {
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100276 const HInstruction* instruction = node->GetInstruction();
277 output << InstructionTypeId(instruction) << ":s -> end_of_scheduling:n "
278 << "[label=\"" << node->GetLatency() << "\",dir=back]\n";
279 }
280 // End of the dot graph.
281 output << "}\n";
282 output.close();
283}
284
285SchedulingNode* CriticalPathSchedulingNodeSelector::SelectMaterializedCondition(
286 ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) const {
287 // Schedule condition inputs that can be materialized immediately before their use.
288 // In following example, after we've scheduled HSelect, we want LessThan to be scheduled
289 // immediately, because it is a materialized condition, and will be emitted right before HSelect
290 // in codegen phase.
291 //
292 // i20 HLessThan [...] HLessThan HAdd HAdd
293 // i21 HAdd [...] ===> | | |
294 // i22 HAdd [...] +----------+---------+
295 // i23 HSelect [i21, i22, i20] HSelect
296
297 if (prev_select_ == nullptr) {
298 return nullptr;
299 }
300
301 const HInstruction* instruction = prev_select_->GetInstruction();
302 const HCondition* condition = nullptr;
303 DCHECK(instruction != nullptr);
304
305 if (instruction->IsIf()) {
306 condition = instruction->AsIf()->InputAt(0)->AsCondition();
307 } else if (instruction->IsSelect()) {
308 condition = instruction->AsSelect()->GetCondition()->AsCondition();
309 }
310
311 SchedulingNode* condition_node = (condition != nullptr) ? graph.GetNode(condition) : nullptr;
312
313 if ((condition_node != nullptr) &&
314 condition->HasOnlyOneNonEnvironmentUse() &&
315 ContainsElement(*nodes, condition_node)) {
316 DCHECK(!condition_node->HasUnscheduledSuccessors());
317 // Remove the condition from the list of candidates and schedule it.
318 RemoveElement(*nodes, condition_node);
319 return condition_node;
320 }
321
322 return nullptr;
323}
324
325SchedulingNode* CriticalPathSchedulingNodeSelector::PopHighestPriorityNode(
326 ArenaVector<SchedulingNode*>* nodes, const SchedulingGraph& graph) {
327 DCHECK(!nodes->empty());
328 SchedulingNode* select_node = nullptr;
329
330 // Optimize for materialized condition and its emit before use scenario.
331 select_node = SelectMaterializedCondition(nodes, graph);
332
333 if (select_node == nullptr) {
334 // Get highest priority node based on critical path information.
335 select_node = (*nodes)[0];
336 size_t select = 0;
337 for (size_t i = 1, e = nodes->size(); i < e; i++) {
338 SchedulingNode* check = (*nodes)[i];
339 SchedulingNode* candidate = (*nodes)[select];
340 select_node = GetHigherPrioritySchedulingNode(candidate, check);
341 if (select_node == check) {
342 select = i;
343 }
344 }
345 DeleteNodeAtIndex(nodes, select);
346 }
347
348 prev_select_ = select_node;
349 return select_node;
350}
351
352SchedulingNode* CriticalPathSchedulingNodeSelector::GetHigherPrioritySchedulingNode(
353 SchedulingNode* candidate, SchedulingNode* check) const {
354 uint32_t candidate_path = candidate->GetCriticalPath();
355 uint32_t check_path = check->GetCriticalPath();
356 // First look at the critical_path.
357 if (check_path != candidate_path) {
358 return check_path < candidate_path ? check : candidate;
359 }
360 // If both critical paths are equal, schedule instructions with a higher latency
361 // first in program order.
362 return check->GetLatency() < candidate->GetLatency() ? check : candidate;
363}
364
365void HScheduler::Schedule(HGraph* graph) {
366 for (HBasicBlock* block : graph->GetReversePostOrder()) {
367 if (IsSchedulable(block)) {
368 Schedule(block);
369 }
370 }
371}
372
373void HScheduler::Schedule(HBasicBlock* block) {
374 ArenaVector<SchedulingNode*> scheduling_nodes(arena_->Adapter(kArenaAllocScheduler));
375
376 // Build the scheduling graph.
377 scheduling_graph_.Clear();
378 for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
379 HInstruction* instruction = it.Current();
380 SchedulingNode* node = scheduling_graph_.AddNode(instruction, IsSchedulingBarrier(instruction));
381 CalculateLatency(node);
382 scheduling_nodes.push_back(node);
383 }
384
385 if (scheduling_graph_.Size() <= 1) {
386 scheduling_graph_.Clear();
387 return;
388 }
389
390 cursor_ = block->GetLastInstruction();
391
392 // Find the initial candidates for scheduling.
393 candidates_.clear();
394 for (SchedulingNode* node : scheduling_nodes) {
395 if (!node->HasUnscheduledSuccessors()) {
396 node->MaybeUpdateCriticalPath(node->GetLatency());
397 candidates_.push_back(node);
398 }
399 }
400
401 ArenaVector<SchedulingNode*> initial_candidates(arena_->Adapter(kArenaAllocScheduler));
402 if (kDumpDotSchedulingGraphs) {
403 // Remember the list of initial candidates for debug output purposes.
404 initial_candidates.assign(candidates_.begin(), candidates_.end());
405 }
406
407 // Schedule all nodes.
408 while (!candidates_.empty()) {
409 Schedule(selector_->PopHighestPriorityNode(&candidates_, scheduling_graph_));
410 }
411
412 if (kDumpDotSchedulingGraphs) {
413 // Dump the graph in `dot` format.
414 HGraph* graph = block->GetGraph();
415 std::stringstream description;
416 description << graph->GetDexFile().PrettyMethod(graph->GetMethodIdx())
417 << " B" << block->GetBlockId();
418 scheduling_graph_.DumpAsDotGraph(description.str(), initial_candidates);
419 }
420}
421
422void HScheduler::Schedule(SchedulingNode* scheduling_node) {
423 // Check whether any of the node's predecessors will be valid candidates after
424 // this node is scheduled.
425 uint32_t path_to_node = scheduling_node->GetCriticalPath();
426 for (SchedulingNode* predecessor : scheduling_node->GetDataPredecessors()) {
427 predecessor->MaybeUpdateCriticalPath(
428 path_to_node + predecessor->GetInternalLatency() + predecessor->GetLatency());
429 predecessor->DecrementNumberOfUnscheduledSuccessors();
430 if (!predecessor->HasUnscheduledSuccessors()) {
431 candidates_.push_back(predecessor);
432 }
433 }
434 for (SchedulingNode* predecessor : scheduling_node->GetOtherPredecessors()) {
435 // Do not update the critical path.
436 // The 'other' (so 'non-data') dependencies (usually) do not represent a
437 // 'material' dependency of nodes on others. They exist for program
438 // correctness. So we do not use them to compute the critical path.
439 predecessor->DecrementNumberOfUnscheduledSuccessors();
440 if (!predecessor->HasUnscheduledSuccessors()) {
441 candidates_.push_back(predecessor);
442 }
443 }
444
445 Schedule(scheduling_node->GetInstruction());
446}
447
448// Move an instruction after cursor instruction inside one basic block.
449static void MoveAfterInBlock(HInstruction* instruction, HInstruction* cursor) {
450 DCHECK_EQ(instruction->GetBlock(), cursor->GetBlock());
451 DCHECK_NE(cursor, cursor->GetBlock()->GetLastInstruction());
452 DCHECK(!instruction->IsControlFlow());
453 DCHECK(!cursor->IsControlFlow());
454 instruction->MoveBefore(cursor->GetNext(), /* do_checks */ false);
455}
456
457void HScheduler::Schedule(HInstruction* instruction) {
458 if (instruction == cursor_) {
459 cursor_ = cursor_->GetPrevious();
460 } else {
461 MoveAfterInBlock(instruction, cursor_);
462 }
463}
464
465bool HScheduler::IsSchedulable(const HInstruction* instruction) const {
466 // We want to avoid exhaustively listing all instructions, so we first check
467 // for instruction categories that we know are safe.
468 if (instruction->IsControlFlow() ||
469 instruction->IsConstant()) {
470 return true;
471 }
472 // Currently all unary and binary operations are safe to schedule, so avoid
473 // checking for each of them individually.
474 // Since nothing prevents a new scheduling-unsafe HInstruction to subclass
475 // HUnaryOperation (or HBinaryOperation), check in debug mode that we have
476 // the exhaustive lists here.
477 if (instruction->IsUnaryOperation()) {
478 DCHECK(instruction->IsBooleanNot() ||
479 instruction->IsNot() ||
480 instruction->IsNeg()) << "unexpected instruction " << instruction->DebugName();
481 return true;
482 }
483 if (instruction->IsBinaryOperation()) {
484 DCHECK(instruction->IsAdd() ||
485 instruction->IsAnd() ||
486 instruction->IsCompare() ||
487 instruction->IsCondition() ||
488 instruction->IsDiv() ||
489 instruction->IsMul() ||
490 instruction->IsOr() ||
491 instruction->IsRem() ||
492 instruction->IsRor() ||
493 instruction->IsShl() ||
494 instruction->IsShr() ||
495 instruction->IsSub() ||
496 instruction->IsUShr() ||
497 instruction->IsXor()) << "unexpected instruction " << instruction->DebugName();
498 return true;
499 }
500 // The scheduler should not see any of these.
501 DCHECK(!instruction->IsParallelMove()) << "unexpected instruction " << instruction->DebugName();
502 // List of instructions explicitly excluded:
503 // HClearException
504 // HClinitCheck
505 // HDeoptimize
506 // HLoadClass
507 // HLoadException
508 // HMemoryBarrier
509 // HMonitorOperation
510 // HNativeDebugInfo
511 // HThrow
512 // HTryBoundary
513 // TODO: Some of the instructions above may be safe to schedule (maybe as
514 // scheduling barriers).
515 return instruction->IsArrayGet() ||
516 instruction->IsArraySet() ||
517 instruction->IsArrayLength() ||
518 instruction->IsBoundType() ||
519 instruction->IsBoundsCheck() ||
520 instruction->IsCheckCast() ||
521 instruction->IsClassTableGet() ||
522 instruction->IsCurrentMethod() ||
523 instruction->IsDivZeroCheck() ||
524 instruction->IsInstanceFieldGet() ||
525 instruction->IsInstanceFieldSet() ||
526 instruction->IsInstanceOf() ||
527 instruction->IsInvokeInterface() ||
528 instruction->IsInvokeStaticOrDirect() ||
529 instruction->IsInvokeUnresolved() ||
530 instruction->IsInvokeVirtual() ||
531 instruction->IsLoadString() ||
532 instruction->IsNewArray() ||
533 instruction->IsNewInstance() ||
534 instruction->IsNullCheck() ||
535 instruction->IsPackedSwitch() ||
536 instruction->IsParameterValue() ||
537 instruction->IsPhi() ||
538 instruction->IsReturn() ||
539 instruction->IsReturnVoid() ||
540 instruction->IsSelect() ||
541 instruction->IsStaticFieldGet() ||
542 instruction->IsStaticFieldSet() ||
543 instruction->IsSuspendCheck() ||
544 instruction->IsTypeConversion() ||
545 instruction->IsUnresolvedInstanceFieldGet() ||
546 instruction->IsUnresolvedInstanceFieldSet() ||
547 instruction->IsUnresolvedStaticFieldGet() ||
548 instruction->IsUnresolvedStaticFieldSet();
549}
550
551bool HScheduler::IsSchedulable(const HBasicBlock* block) const {
552 // We may be only interested in loop blocks.
553 if (only_optimize_loop_blocks_ && !block->IsInLoop()) {
554 return false;
555 }
556 if (block->GetTryCatchInformation() != nullptr) {
557 // Do not schedule blocks that are part of try-catch.
558 // Because scheduler cannot see if catch block has assumptions on the instruction order in
559 // the try block. In following example, if we enable scheduler for the try block,
560 // MulitiplyAccumulate may be scheduled before DivZeroCheck,
561 // which can result in an incorrect value in the catch block.
562 // try {
563 // a = a/b; // DivZeroCheck
564 // // Div
565 // c = c*d+e; // MulitiplyAccumulate
566 // } catch {System.out.print(c); }
567 return false;
568 }
569 // Check whether all instructions in this block are schedulable.
570 for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
571 if (!IsSchedulable(it.Current())) {
572 return false;
573 }
574 }
575 return true;
576}
577
578bool HScheduler::IsSchedulingBarrier(const HInstruction* instr) const {
579 return instr->IsControlFlow() ||
580 // Don't break calling convention.
581 instr->IsParameterValue() ||
582 // Code generation of goto relies on SuspendCheck's position.
583 instr->IsSuspendCheck();
584}
585
586void HInstructionScheduling::Run(bool only_optimize_loop_blocks,
587 bool schedule_randomly) {
xueliang.zhongf7caf682017-03-01 16:07:02 +0000588#if defined(ART_ENABLE_CODEGEN_arm64) || defined(ART_ENABLE_CODEGEN_arm)
589 // Phase-local allocator that allocates scheduler internal data structures like
590 // scheduling nodes, internel nodes map, dependencies, etc.
591 ArenaAllocator arena_allocator(graph_->GetArena()->GetArenaPool());
592 CriticalPathSchedulingNodeSelector critical_path_selector;
593 RandomSchedulingNodeSelector random_selector;
594 SchedulingNodeSelector* selector = schedule_randomly
595 ? static_cast<SchedulingNodeSelector*>(&random_selector)
596 : static_cast<SchedulingNodeSelector*>(&critical_path_selector);
597#else
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100598 // Avoid compilation error when compiling for unsupported instruction set.
599 UNUSED(only_optimize_loop_blocks);
600 UNUSED(schedule_randomly);
xueliang.zhongf7caf682017-03-01 16:07:02 +0000601#endif
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100602 switch (instruction_set_) {
603#ifdef ART_ENABLE_CODEGEN_arm64
604 case kArm64: {
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100605 arm64::HSchedulerARM64 scheduler(&arena_allocator, selector);
606 scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
607 scheduler.Schedule(graph_);
608 break;
609 }
610#endif
xueliang.zhongf7caf682017-03-01 16:07:02 +0000611#if defined(ART_ENABLE_CODEGEN_arm)
612 case kThumb2:
613 case kArm: {
614 arm::SchedulingLatencyVisitorARM arm_latency_visitor(codegen_);
615 arm::HSchedulerARM scheduler(&arena_allocator, selector, &arm_latency_visitor);
616 scheduler.SetOnlyOptimizeLoopBlocks(only_optimize_loop_blocks);
617 scheduler.Schedule(graph_);
618 break;
619 }
620#endif
Alexandre Rames22aa54b2016-10-18 09:32:29 +0100621 default:
622 break;
623 }
624}
625
626} // namespace art