summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
authorMaximilian Stemmer-Grabow <mail@mxsg.de>2021-05-19 14:27:09 +0200
committerAndreas Fried <andreas.fried@kit.edu>2021-12-02 12:57:28 +0100
commit0996d33d39672cf87d76fa6be94cd6ae17eb035e (patch)
treeb7a13f557e51a05d2175897103b70ab17e150114
parent8c992f43c3abe6a1d0577047b0bcf04b1d0f6bbb (diff)
Convert copy optimization heuristic chunk weights to floating point
This allows the weights of the edges that contribute to the weights to be scaled with sub-integer precision, which is required for more fine-grained control over weights for different types of edges in affinity graphs. Weights are used as priorities in priority queues in the heuristic and the used priority queue implementation only allows integer priorities. In those cases where weights are very close and differ only by sub-integer values, we do not depend on their exact ordering in the queue, so we keep integer precision in those cases.
-rw-r--r--ir/be/becopyheur4.c48
1 files changed, 34 insertions, 14 deletions
diff --git a/ir/be/becopyheur4.c b/ir/be/becopyheur4.c
index b2266fd..c9d864f 100644
--- a/ir/be/becopyheur4.c
+++ b/ir/be/becopyheur4.c
@@ -68,7 +68,7 @@ typedef struct col_cost_t {
typedef struct aff_chunk_t {
const ir_node **n; /**< An ARR_F containing all nodes of the chunk. */
const ir_node **interfere; /**< An ARR_F containing all inference. */
- int weight; /**< Weight of this chunk */
+ double weight; /**< Weight of this chunk */
unsigned id; /**< An id of this chunk. */
unsigned visited;
list_head list;
@@ -90,8 +90,8 @@ typedef struct aff_chunk_t {
typedef struct aff_edge_t {
const ir_node *src; /**< Source node. */
const ir_node *tgt; /**< Target node. */
- int weight; /**< The weight of this edge. */
- double original_weight; /**< The unscaled weight of this edge. */
+ double weight; /**< The weight of this edge. */
+ int unscaled_weight; /**< The unscaled weight of this edge. */
aff_edge_type_t edge_type; /**< The type of affinity this edge describes. */
bool pruned : 1; /**< Set if this edge was pruned and should be ignored. */
} aff_edge_t;
@@ -134,7 +134,7 @@ typedef struct co_mst_irn_t {
static double scaled_edge_weight(const aff_edge_t * edge)
{
- double result = (double)edge->original_weight;
+ double result = (double)edge->unscaled_weight;
// Scale the weight down in case it is a compression-related edge
if(edge->edge_type == aff_edge_compression) {
@@ -219,7 +219,7 @@ static void dbg_aff_chunk(const co_mst_env_t *env, const aff_chunk_t *c)
{
(void)env;
if (c->weight_consistent)
- ir_fprintf(stderr, " $%d ", c->weight);
+ ir_fprintf(stderr, " $%f ", c->weight);
ir_fprintf(stderr, "{");
for (size_t i = 0, l = ARR_LEN(c->n); i < l; ++i) {
const ir_node *n = c->n[i];
@@ -324,7 +324,7 @@ static inline aff_chunk_t *new_aff_chunk(co_mst_env_t *env)
aff_chunk_t *c = XMALLOCF(aff_chunk_t, color_affinity, env->n_regs);
c->n = NEW_ARR_F(const ir_node *, 0);
c->interfere = NEW_ARR_F(const ir_node *, 0);
- c->weight = -1;
+ c->weight = -1.0;
c->weight_consistent = false;
#ifndef NDEBUG
c->deleted = false;
@@ -546,7 +546,7 @@ static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c)
}
int register_restr_count = 0;
- int w = 0;
+ double w = 0.0;
for (unsigned idx = 0, len = ARR_LEN(c->n); idx < len; ++idx) {
const ir_node *n = c->n[idx];
const affinity_node_t *an = get_affinity_info(env->co, n);
@@ -571,7 +571,16 @@ static void aff_chunk_assure_weight(co_mst_env_t *env, aff_chunk_t *c)
if (arch_irn_is_ignore(m))
continue;
- w += node_contains(c->n, m) ? neigh->costs : 0;
+ // Scale the cost according to the edge type
+ if (node_contains(c->n, m)) {
+ double cost = (double)neigh->costs;
+
+ if (neigh->aff_type == aff_edge_compression) {
+ cost *= compression_cost_scale;
+ }
+
+ w += cost;
+ }
}
}
}
@@ -654,8 +663,8 @@ static void build_affinity_chunks(co_mst_env_t *env)
* these weights are pure hackery ;-).
* It's not chriswue's fault but mine.
*/
- edge.original_weight = neigh->costs;
- edge.weight = MAX(1, edge.original_weight * compression_cost_scale);
+ edge.unscaled_weight = neigh->costs;
+ edge.weight = edge.unscaled_weight * compression_cost_scale;
ARR_APP1(aff_edge_t, edges, edge);
}
@@ -666,7 +675,7 @@ static void build_affinity_chunks(co_mst_env_t *env)
/* now: sort edges and build the affinity chunks */
QSORT_ARR(edges, cmp_aff_edge);
for (size_t i = 0, len = ARR_LEN(edges); i < len; ++i) {
- DBG((dbg, LEVEL_1, "edge (%u,%u) %f (original %f)\n", edges[i].src->node_idx, edges[i].tgt->node_idx, edges[i].weight, edges[i].original_weight));
+ DBG((dbg, LEVEL_1, "edge (%u,%u) %f (unscaled %i)\n", edges[i].src->node_idx, edges[i].tgt->node_idx, edges[i].weight, edges[i].unscaled_weight));
aff_chunk_absorb(env, edges[i].src, edges[i].tgt);
}
@@ -679,7 +688,11 @@ static void build_affinity_chunks(co_mst_env_t *env)
DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
DBG((dbg, LEVEL_1, "\n"));
- pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
+ // Convert floating point weight of the chunk to integer
+ // to be able to use them as priority in this priority
+ // queue implementation. This may truncate the value, but
+ // we do not care about sub-integer precision for these weights.
+ pqueue_put(env->chunks, curr_chunk, (int)curr_chunk->weight);
}
for (size_t pn = 0, n = ARR_LEN(env->map.data); pn < n; ++pn) {
@@ -699,7 +712,9 @@ static void build_affinity_chunks(co_mst_env_t *env)
DBG_AFF_CHUNK(env, LEVEL_1, curr_chunk);
DBG((dbg, LEVEL_1, "\n"));
- pqueue_put(env->chunks, curr_chunk, curr_chunk->weight);
+ // See above, truncate floating point weight to integer for use in
+ // priority queue.
+ pqueue_put(env->chunks, curr_chunk, (int)curr_chunk->weight);
}
DEL_ARR_F(edges);
@@ -1262,7 +1277,12 @@ static void color_aff_chunk(co_mst_env_t *env, aff_chunk_t *c)
expand_chunk_from(env, node, visited, new_chunk, c, decider_always_yes, 0);
aff_chunk_assure_weight(env, new_chunk);
- pqueue_put(env->chunks, new_chunk, new_chunk->weight);
+
+ // Convert floating point weight of the chunk to integer
+ // to be able to use them as priority in this priority
+ // queue implementation. This may truncate the value, but
+ // we do not care about sub-integer precision for these weights.
+ pqueue_put(env->chunks, new_chunk, (int)new_chunk->weight);
}
}