summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorJonas Haag <jonas@lophus.org>2015-12-26 19:10:51 +0100
committerPhilipp Serrer <philipp@serrer.de>2018-01-18 18:05:11 +0100
commitd9f5c8bb9bb217aaed83d2ebb455dcd1f955ab04 (patch)
tree92c67d3d87b4d79bcb2b8d1d31e980b6e87fcc96
parent78974f9cb8830ef2fb64262de327e8160c482881 (diff)
Refactor fallthrough checks in ia32 and amd64 emitters
Unify code related to "can fallthrough?" checks, which is implemented using irn links. The refactored code now is almost identical in the two backends, the differences being that A) amd64 doesn't have SwitchJmp and B) in the ia32 backend *predecessor* block links are used whereas in the amd64 backend *successor* block links are used.
-rw-r--r--ir/be/amd64/amd64_emitter.c22
-rw-r--r--ir/be/ia32/ia32_emitter.c7
2 files changed, 14 insertions, 15 deletions
diff --git a/ir/be/amd64/amd64_emitter.c b/ir/be/amd64/amd64_emitter.c
index 0be7e20..181fec7 100644
--- a/ir/be/amd64/amd64_emitter.c
+++ b/ir/be/amd64/amd64_emitter.c
@@ -39,6 +39,13 @@ static bool fallthrough_possible(const ir_node *block, const ir_node *target)
return be_emit_get_prev_block(target) == block;
}
+static bool is_fallthrough(const ir_node *node)
+{
+ const ir_node *const source_block = get_nodes_block(node);
+ const ir_node *const target_block = be_emit_get_cfop_target(node);
+ return fallthrough_possible(source_block, target_block);
+}
+
static char get_gp_size_suffix(x86_insn_size_t const size)
{
switch (size) {
@@ -603,18 +610,13 @@ static void emit_amd64_call(const ir_node* node)
if (is_cfop(node)) {
/* If the call throws we have to add a jump to its X_regular block. */
- const ir_node* const block = get_nodes_block(node);
- const ir_node* const x_regular_proj = get_Proj_for_pn(node, node->op->pn_x_regular);
+ const ir_node* const x_regular_proj = get_Proj_for_pn(node, node->op->pn_x_regular);
if (x_regular_proj == NULL) {
/* Call always throws and/or never returns. */
- } else {
- const ir_node* const x_regular_block = be_emit_get_cfop_target(x_regular_proj);
- assert(x_regular_block != NULL);
- if (!fallthrough_possible(block, x_regular_block)) {
- amd64_emitf(x_regular_proj, "jmp %L");
- } else if (be_options.verbose_asm) {
- amd64_emitf(x_regular_proj, "/* fallthrough to %L */");
- }
+ } else if (!is_fallthrough(x_regular_proj)) {
+ amd64_emitf(x_regular_proj, "jmp %L");
+ } else if (be_options.verbose_asm) {
+ amd64_emitf(x_regular_proj, "/* fallthrough to %L */");
}
}
}
diff --git a/ir/be/ia32/ia32_emitter.c b/ir/be/ia32/ia32_emitter.c
index 580b5e7..a7052c2 100644
--- a/ir/be/ia32/ia32_emitter.c
+++ b/ir/be/ia32/ia32_emitter.c
@@ -1179,14 +1179,11 @@ static void emit_ia32_Call(const ir_node *node)
if (is_cfop(node)) {
/* If the call throws we have to add a jump to its X_regular block. */
- const ir_node* const block = get_nodes_block(node);
- const ir_node* const x_regular_proj = get_Proj_for_pn(node, node->op->pn_x_regular);
+ const ir_node* const x_regular_proj = get_Proj_for_pn(node, node->op->pn_x_regular);
if (x_regular_proj == NULL) {
/* Call always throws and/or never returns. */
} else {
- const ir_node* const x_regular_block = be_emit_get_cfop_target(x_regular_proj);
- assert(x_regular_block != NULL);
- if (fallthrough_possible(block, x_regular_block)) {
+ if (is_fallthrough(x_regular_proj)) {
if (be_options.verbose_asm)
ia32_emitf(x_regular_proj, "/* fallthrough to %L */");
} else {