summaryrefslogtreecommitdiffhomepage
path: root/ir/be/beblocksched.h
diff options
context:
space:
mode:
authorMatthias Braun <matze@braunis.de>2006-09-30 12:16:53 +0200
committerMatthias Braun <matze@braunis.de>2006-09-30 12:16:53 +0200
commit97927c8e372f337e8342b698072facf4ffa453ad (patch)
tree3d55fb5a216785b65627dbdc91edce0d7776e1df /ir/be/beblocksched.h
parent8d3747b480395d0097ef78e4a764c86f69cfcc7b (diff)
- Added 2 new blockschedulers, a greedy algorithm and an "optimal" ILP that
both try to transform as many jumps as possible to fallthroughs (weighted by execution frequency). The results are most of the times better than the extbb blockscheduler. Though it seems like x86 branch prediction sees conditional backward jumps as always taken, so we have to make sure that for 50/50 jumps we don't create backward jumps. (nothing implemented for this yet)
Diffstat (limited to 'ir/be/beblocksched.h')
-rw-r--r--ir/be/beblocksched.h21
1 files changed, 21 insertions, 0 deletions
diff --git a/ir/be/beblocksched.h b/ir/be/beblocksched.h
new file mode 100644
index 0000000..3c8a345
--- /dev/null
+++ b/ir/be/beblocksched.h
@@ -0,0 +1,21 @@
+/*
+ * Block schedule calculator
+ *
+ * $Id$
+ */
+#ifndef _BEBLOCKSCHED_H
+#define _BEBLOCKSCHED_H
+
+#include "obst.h"
+#include "execfreq.h"
+#include "irnode.h"
+#include "irgraph.h"
+
+ir_node **be_create_block_schedule(ir_graph *irg, ir_exec_freq *execfreqs);
+
+#ifdef WITH_LIBCORE
+#include <libcore/lc_opts.h>
+void be_block_schedule_register_options(lc_opt_entry_t *grp);
+#endif
+
+#endif