1 /*
2 * Copyright (C) 2023 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "berberis/backend/common/machine_ir_opt.h"
18
19 #include <utility> // std::swap
20
21 #include "berberis/backend/common/machine_ir.h"
22 #include "berberis/base/arena_vector.h"
23 #include "berberis/base/logging.h"
24
25 namespace berberis {
26
27 // Remove those PSEUDO_COPY instructions with identical source and
28 // destination operands.
RemoveNopPseudoCopy(MachineIR * machine_ir)29 void RemoveNopPseudoCopy(MachineIR* machine_ir) {
30 for (auto* machine_bb : machine_ir->bb_list()) {
31 machine_bb->insn_list().remove_if([](MachineInsn* machine_insn) {
32 return machine_insn->opcode() == PseudoCopy::kOpcode &&
33 machine_insn->RegAt(0) == machine_insn->RegAt(1);
34 });
35 }
36 }
37
38 // Remove forwarder blocks, those basic blocks that contain nothing
39 // but unconditional jumps. Jumps to those forwarder blocks are
40 // redirected to their respective final destinations.
RemoveForwarderBlocks(MachineIR * machine_ir)41 void RemoveForwarderBlocks(MachineIR* machine_ir) {
42 // A map from a MachineBasicBlock ID to MachineBasicBlock* to
43 // describe where forwarder blocks take us to. Specifically, if
44 // basic block BB is a forwarder block, then forwarder_map[BB]
45 // points to the MachineBasicBlock that the forwarder block jumps
46 // to. Otherwise, forwarder_map[BB] is nullptr.
47 ArenaVector<const MachineBasicBlock*> forwarder_map(
48 machine_ir->NumBasicBlocks(), nullptr, machine_ir->arena());
49
50 // Identify forwarder blocks. We store in forwarder_map mappings
51 // from source blocks to destination blocks.
52 for (const auto* machine_bb : machine_ir->bb_list()) {
53 if (machine_bb->insn_list().size() != 1) continue;
54
55 const MachineInsn* last_insn = machine_bb->insn_list().back();
56 if (last_insn->opcode() == PseudoBranch::kOpcode) {
57 const PseudoBranch* branch_insn = static_cast<const PseudoBranch*>(last_insn);
58 forwarder_map[machine_bb->id()] = branch_insn->then_bb();
59 }
60 }
61
62 // We might have a forwarder block that jumps to another forwarder
63 // block. Go through the entire map and determine the final
64 // destination for each entry.
65 //
66 // Note that we *must* do this for correctness. If we did not, then
67 // a jump to a forwarder block that in turn jumps to another
68 // forwarder block would get re-written as a jump to a deleted basic
69 // block, which would be a disaster.
70 for (size_t i = 0; i < forwarder_map.size(); i++) {
71 auto* final_dest = forwarder_map[i];
72 if (final_dest == nullptr) {
73 continue;
74 }
75
76 unsigned steps = 0;
77 while (auto* bb = forwarder_map[final_dest->id()]) {
78 final_dest = bb;
79
80 // Assert that we don't have a loop composed of forwarder
81 // blocks only.
82 ++steps;
83 CHECK_LT(steps, forwarder_map.size());
84 }
85 forwarder_map[i] = final_dest;
86 }
87
88 // Redirect jumps to forwarder blocks.
89 for (const auto* machine_bb : machine_ir->bb_list()) {
90 const MachineInsnList& insn_list = machine_bb->insn_list();
91 if (insn_list.empty()) {
92 continue;
93 }
94
95 MachineInsn* last_insn = insn_list.back();
96 if (last_insn->opcode() == PseudoBranch::kOpcode) {
97 PseudoBranch* branch_insn = static_cast<PseudoBranch*>(last_insn);
98 if (auto* new_dest = forwarder_map[branch_insn->then_bb()->id()]) {
99 branch_insn->set_then_bb(new_dest);
100 }
101 } else if (last_insn->opcode() == PseudoCondBranch::kOpcode) {
102 PseudoCondBranch* branch_insn = static_cast<PseudoCondBranch*>(last_insn);
103 if (auto* new_then_bb = forwarder_map[branch_insn->then_bb()->id()]) {
104 branch_insn->set_then_bb(new_then_bb);
105 }
106 if (auto* new_else_bb = forwarder_map[branch_insn->else_bb()->id()]) {
107 branch_insn->set_else_bb(new_else_bb);
108 }
109 }
110 }
111
112 // Don't remove the first basic block even if it is a forwarder
113 // block. Since it is the entry point into the region, removing it
114 // could change the semantics of the region if it jumps to a basic
115 // block other than the second one, in which case the entry point is
116 // actually changed.
117 forwarder_map[machine_ir->bb_list().front()->id()] = nullptr;
118
119 // Remove forwarder blocks.
120 machine_ir->bb_list().remove_if([&forwarder_map](const MachineBasicBlock* machine_bb) {
121 return forwarder_map[machine_bb->id()] != nullptr;
122 });
123 }
124
125 // Reorder basic blocks so that recovery basic blocks come at the end
126 // of the basic block chain.
127 //
128 // By moving recovery basic blocks to the end of the basic block
129 // chain, we solve two problems -- improving cache locality while
130 // avoiding unconditional jumps around recovery basic blocks. In
131 // future, we might generalize this function to handle other cold
132 // blocks and such.
133 //
134 // Moving exit blocks to the end doesn't break MachineIR::BasicBlocksOrder::kReversePostOrder
135 // properties we are interested in. So we intentionally keep this order valid if it was set.
MoveColdBlocksToEnd(MachineIR * machine_ir)136 void MoveColdBlocksToEnd(MachineIR* machine_ir) {
137 // Since the first bb is region entry, we must keep it in
138 // place. Fortunately, a recovery block cannot be the first one,
139 // since it must follow a faulty instruction.
140 CHECK(!machine_ir->bb_list().front()->is_recovery());
141
142 // We are going to partition bb_list() into normal and recovery
143 // basic blocks. We preserve the order of normal basic blocks so
144 // that they will more likely fall through (that is, without
145 // unconditional jumps around recovery basic blocks). We do not
146 // preserve the order of recovery basic blocks.
147 //
148 // We've chosen not to use std::stable_partition because we don't
149 // like the fact that it allocates a temporary buffer in the regular
150 // heap.
151 auto* bb_list = &(machine_ir->bb_list());
152 auto normal_it = bb_list->begin();
153 for (auto*& bb : *bb_list) {
154 if (!bb->is_recovery()) {
155 std::swap(*normal_it++, bb); // can be the same
156 }
157 }
158 }
159
160 } // namespace berberis
161