summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorMaximilian Stemmer-Grabow <mail@mxsg.de>2021-07-12 17:26:58 +0200
committerAndreas Fried <andreas.fried@kit.edu>2021-12-02 12:57:28 +0100
commit81ddcd60812dc146fd3d3f57b2cd2b1845a70c43 (patch)
treedbd27a9c3509a58bab510460b8eed251777eba7a
parentea1fafd0e1eecb3558c25280e2b36b9bddc8fde5 (diff)
Rebased: ignore pruned edges
-rw-r--r--ir/be/becopyheur4.c32
1 files changed, 29 insertions, 3 deletions
diff --git a/ir/be/becopyheur4.c b/ir/be/becopyheur4.c
index acc3ef4..96ba3ec 100644
--- a/ir/be/becopyheur4.c
+++ b/ir/be/becopyheur4.c
@@ -71,7 +71,7 @@ typedef struct aff_chunk_t {
const ir_node **interfere; /**< An ARR_F containing all inference. */
double weight; /**< Weight of this chunk */
unsigned id; /**< An id of this chunk. */
- unsigned visited;
+ unsigned visited; /**< Indicates when the chunk was last visited. */
list_head list;
bool weight_consistent : 1; /**< Set if the weight is consistent. */
@@ -141,6 +141,10 @@ typedef struct co_mst_irn_t {
static double scaled_edge_weight(const aff_edge_t * edge)
{
+ // Zero weight if the edge should be ignored
+ if(edge->pruned) {
+ return 0.0;
+ }
double result = (double)edge->unscaled_weight;
// Scale the weight down in case it is a compression-related edge
@@ -323,14 +327,18 @@ static int cmp_aff_edge(const void *a, const void *b)
const aff_edge_t *e1 = (const aff_edge_t*)a;
const aff_edge_t *e2 = (const aff_edge_t*)b;
- if (e2->weight == e1->weight) {
+ // Ignore weight for pruned edges
+ const double e1_weight = e1->pruned ? 0.0 : e1->weight;
+ const double e2_weight = e1->pruned ? 0.0 : e2->weight;
+
+ if (e1_weight == e2_weight) {
if (e2->src->node_idx == e1->src->node_idx)
return QSORT_CMP(e2->tgt->node_idx, e1->tgt->node_idx);
else
return QSORT_CMP(e2->src->node_idx, e1->src->node_idx);
}
/* sort in descending order */
- return QSORT_CMP(e2->weight, e1->weight);
+ return QSORT_CMP(e2_weight, e1_weight);
}
static int cmp_col_cost_gt(const void *a, const void *b)
@@ -607,8 +615,13 @@ static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c)
if (arch_irn_is_ignore(m))
continue;
+ // Ignore cost for pruned edges
+ if (neigh->pruned)
+ continue;
+
// Scale the cost according to the edge type
if (node_contains(c->n, m)) {
+
double cost = (double)neigh->costs;
if (neigh->aff_type == aff_edge_compression) {
@@ -671,6 +684,9 @@ static unsigned count_interfering_aff_neighs(co_mst_env_t *env,
if (arch_irn_is_ignore(n))
continue;
+ if (neigh->pruned)
+ continue;
+
/* check if the affinity neighbour interfere */
for (unsigned i = 0, n_neighs = node->n_neighs; i < n_neighs; ++i) {
if (node->int_neighs[i] == n) {
@@ -723,6 +739,8 @@ static void build_affinity_chunks(co_mst_env_t *env)
edge.unscaled_weight = neigh->costs;
edge.weight = edge.unscaled_weight * compression_cost_scale;
+ edge.pruned = false;
+
ARR_APP1(aff_edge_t, edges, edge);
}
}
@@ -808,6 +826,11 @@ static void expand_chunk_from(co_mst_env_t *env, co_mst_irn_t *node,
const ir_node *m = neigh->irn;
co_mst_irn_t *n2 = get_co_mst_irn(env, m);
unsigned m_idx = get_irn_idx(m);
+
+ // Ignore pruned edges
+ if (neigh->pruned)
+ continue;
+
if (!bitset_is_set(visited, m_idx)
&& decider(n2, col)
&& !n2->fixed
@@ -1144,7 +1167,10 @@ static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
/* compute color preference */
col_cost_t *order = ALLOCANZ(col_cost_t, env->n_regs);
+
+ // Number of chunks interfering with the chunk to be colored
unsigned n_int_chunks = 0;
+
for (size_t pos = 0, len = ARR_LEN(c->interfere); pos < len; ++pos) {
const ir_node *n = c->interfere[pos];
co_mst_irn_t *node = get_co_mst_irn(env, n);