summaryrefslogtreecommitdiffhomepage
path: root/ir/be/bespill.c
diff options
context:
space:
mode:
authorMatthias Braun <matze@braunis.de>2014-07-07 16:03:07 +0200
committerMatthias Braun <matze@braunis.de>2014-07-07 16:06:29 +0200
commit9d0b843b08f4ef4360f682427b51bc0cccb36092 (patch)
tree38fc0ad411cf36e1ecde668d40400a10a21def82 /ir/be/bespill.c
parent771728a99730d7fbb660d4108dd818792f52b81f (diff)
be: change pre spill prepare phase to work on all register classes at once
- Only iterating over the graph once should be slightly faster - We don't need to insert the middle of register allocation logic but can perform it once before. - We can gather statistics on the prepared graph before spilling/regalloc has happened.
Diffstat (limited to 'ir/be/bespill.c')
-rw-r--r--ir/be/bespill.c163
1 files changed, 0 insertions, 163 deletions
diff --git a/ir/be/bespill.c b/ir/be/bespill.c
index 88ee31e..0f43675 100644
--- a/ir/be/bespill.c
+++ b/ir/be/bespill.c
@@ -30,167 +30,6 @@
#include "lc_opts.h"
#include "lc_opts_enum.h"
-DEBUG_ONLY(static firm_dbg_module_t *dbg = NULL;)
-
-typedef struct be_pre_spill_env_t {
- ir_graph *irg;
- const arch_register_class_t *cls;
-} be_pre_spill_env_t;
-
-static void prepare_constr_insn(be_pre_spill_env_t *env, ir_node *node)
-{
- const arch_register_class_t *cls = env->cls;
- ir_node *block = get_nodes_block(node);
- const ir_graph *irg = env->irg;
- be_irg_t *birg = be_birg_from_irg(irg);
- be_lv_t *lv = be_get_irg_liveness(irg);
- unsigned *def_constr = NULL;
-
- /* Insert a copy for constraint inputs attached to a value which can't
- * fulfill the constraint
- * (typical example: stack pointer as input to copyb)
- * TODO: This really just checks precolored registers at the moment and
- * ignores the general case of not matching in/out constraints */
- foreach_irn_in(node, i, op) {
- const arch_register_req_t *req = arch_get_irn_register_req_in(node, i);
- if (req->cls != cls)
- continue;
-
- const arch_register_t *reg = arch_get_irn_register(op);
- if (reg == NULL)
- continue;
-
- /* Precolored with an ignore register (which is not virtual). */
- if (reg->type & arch_register_type_virtual ||
- rbitset_is_set(birg->allocatable_regs, reg->global_index))
- continue;
-
- if (!arch_register_req_is(req, limited))
- continue;
- if (rbitset_is_set(req->limited, reg->index))
- continue;
-
- ir_node *copy = be_new_Copy(block, op);
- stat_ev_int("constr_copy", 1);
- sched_add_before(node, copy);
- set_irn_n(node, i, copy);
- DBG((dbg, LEVEL_3, "inserting ignore arg copy %+F for %+F pos %d\n",
- copy, node, i));
- }
-
- /* insert copies for nodes that occur constrained more than once. */
- int const arity = get_irn_arity(node);
- be_foreach_use(node, cls, req, in, in_req_,
- if (!arch_register_req_is(req, limited))
- continue;
-
- for (int i2 = i_ + 1; i2 < arity; ++i2) {
- const arch_register_req_t *req2
- = arch_get_irn_register_req_in(node, i2);
- if (req2->cls != cls)
- continue;
- if (!arch_register_req_is(req2, limited))
- continue;
-
- ir_node *in2 = get_irn_n(node, i2);
- if (in2 != in)
- continue;
-
- /* if the constraint is the same, no copy is necessary
- * TODO generalise unequal but overlapping constraints */
- if (rbitsets_equal(req->limited, req2->limited, cls->n_regs))
- continue;
-
- ir_node *copy = be_new_Copy(block, in);
- stat_ev_int("constr_copy", 1);
-
- sched_add_before(node, copy);
- set_irn_n(node, i2, copy);
- DBG((dbg, LEVEL_3,
- "inserting multiple constr copy %+F for %+F pos %d\n",
- copy, node, i2));
- }
- );
-
- /* collect all registers occurring in out constraints. */
- be_foreach_definition(node, cls, def, req,
- (void)def;
- if (!arch_register_req_is(req, limited))
- continue;
- if (def_constr == NULL) {
- def_constr = rbitset_alloca(cls->n_regs);
- }
- rbitset_or(def_constr, req->limited, cls->n_regs);
- );
-
- /* no output constraints => we're good */
- if (def_constr == NULL) {
- return;
- }
-
- /*
- * insert copies for all constrained arguments living through the node
- * and being constrained to a register which also occurs in out constraints.
- */
- unsigned *const tmp = rbitset_alloca(cls->n_regs);
- be_foreach_use(node, cls, req, in, in_req_,
- /* Check, if
- * 1) the operand is constrained.
- * 2) lives through the node.
- * 3) is constrained to a register occurring in out constraints.
- */
- if (!arch_register_req_is(req, limited))
- continue;
- if (!be_values_interfere(lv, node, in))
- continue;
-
- rbitset_copy(tmp, req->limited, cls->n_regs);
- rbitset_and(tmp, def_constr, cls->n_regs);
-
- if (rbitset_is_empty(tmp, cls->n_regs))
- continue;
-
- /*
- * only create the copy if the operand is no copy.
- * this is necessary since the assure constraints phase inserts
- * Copies and Keeps for operands which must be different from the
- * results. Additional copies here would destroy this.
- */
- if (be_is_Copy(in))
- continue;
-
- ir_node *copy = be_new_Copy(block, in);
- sched_add_before(node, copy);
- set_irn_n(node, i_, copy);
- DBG((dbg, LEVEL_3, "inserting constr copy %+F for %+F pos %d\n",
- copy, node, i_));
- be_liveness_update(lv, in);
- );
-}
-
-static void pre_spill_prepare_constr_walker(ir_node *block, void *data)
-{
- be_pre_spill_env_t *env = (be_pre_spill_env_t*)data;
- sched_foreach(block, node) {
- prepare_constr_insn(env, node);
- }
-}
-
-void be_pre_spill_prepare_constr(ir_graph *irg,
- const arch_register_class_t *cls)
-{
- be_pre_spill_env_t env;
- memset(&env, 0, sizeof(env));
- env.irg = irg;
- env.cls = cls;
-
- be_assure_live_sets(irg);
-
- irg_block_walk_graph(irg, pre_spill_prepare_constr_walker, NULL, &env);
-}
-
-
-
int be_coalesce_spill_slots = 1;
int be_do_remats = 1;
@@ -225,6 +64,4 @@ void be_init_spilloptions(void)
lc_opt_add_table(spill_grp, be_spill_options);
be_add_module_list_opt(be_grp, "spiller", "spill algorithm",
&spillers, (void**) &selected_spiller);
-
- FIRM_DBG_REGISTER(dbg, "firm.be.spillprepare");
}