diff --git a/m1/perf/sched.c b/m1/perf/sched.c index 9b6e223..e9e5097 100644 --- a/m1/perf/sched.c +++ b/m1/perf/sched.c @@ -44,7 +44,7 @@ #define MAX_LATENCY 8 /* maximum latency; okay to make this bigger */ -#define FIELD(w) (((pfpu_instruction *) &(w))->i) +#define CODE(n) (((pfpu_instruction *) (code+(n)))->i) struct list { @@ -65,7 +65,7 @@ struct insn { int unresolved; /* number of data refs we need before we can sched */ int earliest; /* earliest cycle dependencies seen so far are met */ struct list dependants; /* list of dependencies (constant) */ - int num_dependants; /* number of unresolved dependencies */ + int num_dependants; /* number of dependencies */ #ifdef LCPF int distance; /* minimum cycles on this path until the end */ #endif @@ -221,7 +221,7 @@ static void list_concat(struct list *a, struct list *b) /* ----- Register management ----------------------------------------------- */ -static int reg2idx(int reg) +static int vm_reg2idx(int reg) { return reg >= 0 ? reg : sc->frag->nbindings-reg; } @@ -250,7 +250,7 @@ static int alloc_reg(struct insn *setter) Dprintf(" alloc reg %d -> %d\n", vm_reg, pfpu_reg); - vm_idx = reg2idx(vm_reg); + vm_idx = vm_reg2idx(vm_reg); sc->regs[vm_idx].setter = setter; sc->regs[vm_idx].pfpu_reg = pfpu_reg; sc->regs[vm_idx].refs = setter->num_dependants+1; @@ -266,7 +266,7 @@ static void put_reg(int vm_reg) if (vm_reg >= 0) return; - vm_idx = reg2idx(vm_reg); + vm_idx = vm_reg2idx(vm_reg); assert(sc->regs[vm_idx].refs); if (--sc->regs[vm_idx].refs) return; @@ -300,7 +300,7 @@ static void put_reg_by_setter(struct insn *setter) static int lookup_pfpu_reg(int vm_reg) { - return vm_reg >= 0 ? vm_reg : sc->regs[reg2idx(vm_reg)].pfpu_reg; + return vm_reg >= 0 ? vm_reg : sc->regs[vm_reg2idx(vm_reg)].pfpu_reg; } @@ -344,7 +344,7 @@ static struct vm_reg *add_data_ref(struct insn *insn, struct data_ref *ref, { struct vm_reg *reg; - reg = sc->regs+reg2idx(reg_num); + reg = sc->regs+vm_reg2idx(reg_num); ref->insn = insn; ref->dep = reg->setter; if (ref->dep) { @@ -427,11 +427,11 @@ static void issue(struct insn *insn, int cycle, unsigned *code) case 3: /* fall through */ case 2: - FIELD(code[cycle]).opb = lookup_pfpu_reg(insn->vm_insn->opb); + CODE(cycle).opb = lookup_pfpu_reg(insn->vm_insn->opb); put_reg_by_setter(insn->opb.dep); /* fall through */ case 1: - FIELD(code[cycle]).opa = lookup_pfpu_reg(insn->vm_insn->opa); + CODE(cycle).opa = lookup_pfpu_reg(insn->vm_insn->opa); put_reg_by_setter(insn->opa.dep); break; case 0: @@ -440,8 +440,8 @@ static void issue(struct insn *insn, int cycle, unsigned *code) abort(); } - FIELD(code[end]).dest = alloc_reg(insn); - FIELD(code[cycle]).opcode = fpvm_to_pfpu(insn->vm_insn->opcode); + CODE(end).dest = alloc_reg(insn); + CODE(cycle).opcode = fpvm_to_pfpu(insn->vm_insn->opcode); foreach (ref, &insn->dependants) { if (ref->insn->earliest <= end) @@ -496,7 +496,7 @@ static int schedule(unsigned int *code) end = i+insn->latency; if (end >= PFPU_PROGSIZE) return -1; - if (!FIELD(code[end]).dest) { + if (!CODE(end).dest) { #ifdef LCPF if (!best || best->distance < insn->distance) best = insn; @@ -515,8 +515,8 @@ static int schedule(unsigned int *code) remaining--; } #endif - if (FIELD(code[i]).dest) - put_reg(sc->pfpu_regs[FIELD(code[i]).dest].vm_reg); + if (CODE(i).dest) + put_reg(sc->pfpu_regs[CODE(i).dest].vm_reg); } /* @@ -527,7 +527,7 @@ static int schedule(unsigned int *code) if (end > PFPU_PROGSIZE) end = PFPU_PROGSIZE; while (i != end) { - if (FIELD(code[i]).dest) + if (CODE(i).dest) last = i+1; i++; }