summaryrefslogtreecommitdiffhomepage
path: root/ir/be/bespillbelady.c
diff options
context:
space:
mode:
Diffstat (limited to 'ir/be/bespillbelady.c')
-rw-r--r--ir/be/bespillbelady.c35
1 files changed, 21 insertions, 14 deletions
diff --git a/ir/be/bespillbelady.c b/ir/be/bespillbelady.c
index 709b079..698a6e6 100644
--- a/ir/be/bespillbelady.c
+++ b/ir/be/bespillbelady.c
@@ -75,12 +75,15 @@ static workset_t *temp_workset;
static bool move_spills = true;
static bool respectloopdepth = true;
static bool improve_known_preds = true;
+static bool allow_giving_up = true;
+static bool consider_loop_limited = true;
+static bool consider_limited_workset_entries = true;
/* factor to weight the different costs of reloading/rematerializing a node
(see bespill.h be_get_reload_costs_no_weight) */
static int remat_bonus = 10;
-static bool has_given_up = false;
-static bool can_give_up = true;
+static bool has_given_up;
+static bool can_give_up;
static const lc_opt_table_entry_t options[] = {
LC_OPT_ENT_BOOL ("movespills", "try to move spills out of loops", &move_spills),
@@ -212,7 +215,7 @@ static int loc_compare(const void *a, const void *b)
if (pt > qt)
return 1;
- // We are sorting something else and NOT the workset for displacement
+ // We are sorting something else and NOT the workset for displacement or limited checks were disabled
if (bitset_is_empty(limited_registers)) {
// Tie-breaker to stay deterministic
return get_irn_node_nr(p->node) - get_irn_node_nr(q->node);
@@ -222,7 +225,6 @@ static int loc_compare(const void *a, const void *b)
const arch_register_req_t *qReq = arch_get_irn_register_req(q->node);
// Unlimited registers are smaller than limited as we'd like to keep them around if possible
- // TODO: Is this a wise decision?
if (!pReq->limited && qReq->limited) {
return -1;
} else if (pReq->limited && !qReq->limited) {
@@ -253,6 +255,10 @@ static int loc_compare(const void *a, const void *b)
static void workset_sort(workset_t * new_values, workset_t *workset)
{
+ if (!consider_limited_workset_entries) {
+ QSORT(workset->vals, workset->len, loc_compare);
+ return;
+ }
for (int i = 0; i < new_values->len; ++i) {
const arch_register_req_t *req = arch_get_irn_register_req(new_values->vals[i].node);
if (req->limited != NULL) {
@@ -605,15 +611,16 @@ static void decide_start_workset(ir_node *const block)
DB((dbg, DBG_START, "Loop pressure %d, taking %d delayed vals\n",
pressure, free_slots));
if (free_slots > 0) {
- rbitset_copy_to_bitset(be_get_loop_limited_registers(loop_ana, cls, loop), limited_registers);
- // Sort by next use and whether we already have a value limited to the same register.
- // The largest value will be the one that is either not used in future or, if there are multiple, one
- // limited to a register needed in the loop
- bitset_foreach(limited_registers, i) {
- DB((dbg, DBG_START, " limited %s\n", arch_register_for_index(cls, i)->name));
+ if (consider_loop_limited) {
+ // Sort by next use and whether we already have a value limited to the same register.
+ // The largest value will be the one that is either not used in future or, if there are multiple, one
+ // limited to a register needed in the loop
+ rbitset_copy_to_bitset(be_get_loop_limited_registers(loop_ana, cls, loop), limited_registers);
+ QSORT_ARR(delayed, loc_compare);
+ bitset_clear_all(limited_registers);
+ } else {
+ QSORT_ARR(delayed, loc_compare);
}
- QSORT_ARR(delayed, loc_compare);
- bitset_clear_all(limited_registers);
for (size_t i = 0; i < ARR_LEN(delayed) && free_slots > 0; ++i) {
loc_t *loc = &delayed[i];
@@ -989,8 +996,8 @@ static void be_spill_belady(ir_graph *irg, const arch_register_class_t *rcls,
static void be_spill_belady_wrapper(ir_graph *irg, const arch_register_class_t *rcls,
const regalloc_if_t *regif)
{
- // First try, we can still give up :^)
- can_give_up = true;
+ // First try, we can still give up (if not forbidden) :^)
+ can_give_up = allow_giving_up;
has_given_up = false;
be_spill_belady(irg, rcls, regif);