mirror of
git://projects.qi-hardware.com/wernermisc.git
synced 2024-12-18 12:07:30 +02:00
m1/perf/sched.c: synchronize with upstream version
This commit is contained in:
parent
8b4ef502d7
commit
9e32d56ce9
225
m1/perf/sched.c
225
m1/perf/sched.c
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* sched.c - O(n) ... O(n^2) scheduler
|
||||
* lnfpus.c - O(n) ... O(n^2) scheduler
|
||||
*
|
||||
* Written 2011 by Werner Almesberger
|
||||
* Copyright (C) 2011 Werner Almesberger
|
||||
*
|
||||
* Based on gfpus.c
|
||||
* Copyright (C) 2007, 2008, 2009, 2010 Sebastien Bourdeauducq
|
||||
@ -123,7 +123,7 @@ static void get_registers(struct fpvm_fragment *fragment,
|
||||
} fconv;
|
||||
|
||||
for(i = 0; i < fragment->nbindings; i++)
|
||||
if (fragment->bindings[i].isvar)
|
||||
if(fragment->bindings[i].isvar)
|
||||
registers[i] = 0;
|
||||
else {
|
||||
fconv.f = fragment->bindings[i].b.c;
|
||||
@ -176,7 +176,7 @@ static void *list_pop(struct list *list)
|
||||
struct list *first;
|
||||
|
||||
first = list->next;
|
||||
if (first == list)
|
||||
if(first == list)
|
||||
return NULL;
|
||||
list_del(first);
|
||||
return first;
|
||||
@ -203,7 +203,7 @@ static void list_add(struct list *list, struct list *item)
|
||||
|
||||
static void list_concat(struct list *a, struct list *b)
|
||||
{
|
||||
if (b->next != b) {
|
||||
if(b->next != b) {
|
||||
a->prev->next = b->next;
|
||||
b->next->prev = a->prev;
|
||||
b->prev->next = a;
|
||||
@ -218,7 +218,7 @@ static void list_concat(struct list *a, struct list *b)
|
||||
*/
|
||||
|
||||
#define foreach(var, head) \
|
||||
for (var = (void *) ((head))->next; \
|
||||
for(var = (void *) ((head))->next; \
|
||||
(var) != (void *) (head); \
|
||||
var = (void *) ((struct list *) (var))->next)
|
||||
|
||||
@ -238,17 +238,17 @@ static int alloc_reg(struct insn *setter)
|
||||
int vm_reg, pfpu_reg, vm_idx;
|
||||
|
||||
vm_reg = setter->vm_insn->dest;
|
||||
if (vm_reg >= 0) {
|
||||
if(vm_reg >= 0) {
|
||||
pfpu_reg = vm_reg;
|
||||
sc->pfpu_regs[vm_reg].vm_reg = vm_reg; /* @@@ global init */
|
||||
} else {
|
||||
reg = list_pop(&sc->unallocated);
|
||||
if (!reg)
|
||||
if(!reg)
|
||||
return -1;
|
||||
|
||||
#ifdef REG_STATS
|
||||
sc->curr_regs++;
|
||||
if (sc->curr_regs > sc->max_regs)
|
||||
if(sc->curr_regs > sc->max_regs)
|
||||
sc->max_regs = sc->curr_regs;
|
||||
#endif
|
||||
|
||||
@ -272,14 +272,14 @@ static void put_reg(int vm_reg)
|
||||
int vm_idx;
|
||||
struct vm_reg *reg;
|
||||
|
||||
if (vm_reg >= 0)
|
||||
if(vm_reg >= 0)
|
||||
return;
|
||||
|
||||
vm_idx = vm_reg2idx(vm_reg);
|
||||
reg = sc->regs+vm_idx;
|
||||
|
||||
assert(reg->refs);
|
||||
if (--reg->refs)
|
||||
if(--reg->refs)
|
||||
return;
|
||||
|
||||
Dprintf(" free reg %d\n", reg->pfpu_reg);
|
||||
@ -309,7 +309,7 @@ static int lookup_pfpu_reg(int vm_reg)
|
||||
|
||||
static void mark(int vm_reg)
|
||||
{
|
||||
if (vm_reg > 0)
|
||||
if(vm_reg > 0)
|
||||
sc->pfpu_regs[vm_reg].used = 1;
|
||||
}
|
||||
|
||||
@ -319,22 +319,17 @@ static int init_registers(struct fpvm_fragment *frag,
|
||||
{
|
||||
int i;
|
||||
|
||||
sc->regs =
|
||||
calloc(frag->nbindings-frag->next_sur, sizeof(struct vm_reg));
|
||||
if (!sc->regs)
|
||||
return -1;
|
||||
|
||||
get_registers(frag, registers);
|
||||
|
||||
for (i = 0; i != frag->ninstructions; i++) {
|
||||
for(i = 0; i != frag->ninstructions; i++) {
|
||||
mark(frag->code[i].opa);
|
||||
mark(frag->code[i].opb);
|
||||
mark(frag->code[i].dest);
|
||||
}
|
||||
|
||||
list_init(&sc->unallocated);
|
||||
for (i = PFPU_SPREG_COUNT; i != PFPU_REG_COUNT; i++)
|
||||
if (!sc->pfpu_regs[i].used)
|
||||
for(i = PFPU_SPREG_COUNT; i != PFPU_REG_COUNT; i++)
|
||||
if(!sc->pfpu_regs[i].used)
|
||||
list_add_tail(&sc->unallocated, &sc->pfpu_regs[i].more);
|
||||
|
||||
return 0;
|
||||
@ -352,9 +347,9 @@ static struct vm_reg *add_data_ref(struct insn *insn, struct data_ref *ref,
|
||||
reg = sc->regs+vm_reg2idx(reg_num);
|
||||
ref->insn = insn;
|
||||
ref->dep = reg->setter;
|
||||
if (insn->vm_insn->dest == reg_num)
|
||||
if(insn->vm_insn->dest == reg_num)
|
||||
insn->rmw = 1;
|
||||
if (!ref->dep)
|
||||
if(!ref->dep)
|
||||
reg->refs++;
|
||||
else {
|
||||
list_add_tail(&ref->dep->dependants, &ref->more);
|
||||
@ -378,45 +373,45 @@ static void init_scheduler(struct fpvm_fragment *frag)
|
||||
|
||||
list_init(&sc->unscheduled);
|
||||
list_init(&sc->waiting);
|
||||
for (i = 0; i != PFPU_PROGSIZE; i++)
|
||||
for(i = 0; i != PFPU_PROGSIZE; i++)
|
||||
list_init(sc->ready+i);
|
||||
|
||||
for (i = 0; i != frag->ninstructions; i++) {
|
||||
for(i = 0; i != frag->ninstructions; i++) {
|
||||
insn = sc->insns+i;
|
||||
insn->vm_insn = frag->code+i;
|
||||
insn->arity = fpvm_get_arity(frag->code[i].opcode);
|
||||
insn->latency = pfpu_get_latency(frag->code[i].opcode);
|
||||
list_init(&insn->dependants);
|
||||
switch (insn->arity) {
|
||||
case 3:
|
||||
add_data_ref(insn, &insn->cond, FPVM_REG_IFB);
|
||||
/* fall through */
|
||||
case 2:
|
||||
add_data_ref(insn, &insn->opb, frag->code[i].opb);
|
||||
/* fall through */
|
||||
case 1:
|
||||
add_data_ref(insn, &insn->opa, frag->code[i].opa);
|
||||
/* fall through */
|
||||
case 0:
|
||||
reg = sc->regs+vm_reg2idx(frag->code[i].dest);
|
||||
if (reg->setter) {
|
||||
reg->setter->next_setter = insn;
|
||||
foreach (ref, ®->setter->dependants)
|
||||
if (ref->insn != insn)
|
||||
case 3:
|
||||
add_data_ref(insn, &insn->cond, FPVM_REG_IFB);
|
||||
/* fall through */
|
||||
case 2:
|
||||
add_data_ref(insn, &insn->opb, frag->code[i].opb);
|
||||
/* fall through */
|
||||
case 1:
|
||||
add_data_ref(insn, &insn->opa, frag->code[i].opa);
|
||||
/* fall through */
|
||||
case 0:
|
||||
reg = sc->regs+vm_reg2idx(frag->code[i].dest);
|
||||
if(reg->setter) {
|
||||
reg->setter->next_setter = insn;
|
||||
foreach(ref, ®->setter->dependants)
|
||||
if(ref->insn != insn)
|
||||
insn->unresolved++;
|
||||
if(!insn->rmw)
|
||||
insn->unresolved++;
|
||||
if (!insn->rmw)
|
||||
insn->unresolved++;
|
||||
} else {
|
||||
if (!insn->rmw)
|
||||
insn->unresolved += reg->refs;
|
||||
reg->first_setter = insn;
|
||||
}
|
||||
reg->setter = insn;
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
} else {
|
||||
if(!insn->rmw)
|
||||
insn->unresolved += reg->refs;
|
||||
reg->first_setter = insn;
|
||||
}
|
||||
reg->setter = insn;
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
}
|
||||
if (insn->unresolved)
|
||||
if(insn->unresolved)
|
||||
list_add_tail(&sc->unscheduled, &insn->more);
|
||||
else
|
||||
list_add_tail(&sc->ready[0], &insn->more);
|
||||
@ -425,7 +420,7 @@ static void init_scheduler(struct fpvm_fragment *frag)
|
||||
#ifdef LCPF
|
||||
struct data_ref *dep;
|
||||
|
||||
for (i = frag->ninstructions-1; i >= 0; i--) {
|
||||
for(i = frag->ninstructions-1; i >= 0; i--) {
|
||||
insn = sc->insns+i;
|
||||
#if 0
|
||||
/*
|
||||
@ -435,15 +430,15 @@ static void init_scheduler(struct fpvm_fragment *frag)
|
||||
* This is a degenerate case that's probably not worth
|
||||
* spending much effort on.
|
||||
*/
|
||||
if (insn->next_setter) {
|
||||
if(insn->next_setter) {
|
||||
insn->distance =
|
||||
insn->next_setter->distance-insn->distance+1;
|
||||
if (insn->distance < 1)
|
||||
if(insn->distance < 1)
|
||||
insn->distance = 1;
|
||||
}
|
||||
#endif
|
||||
foreach (dep, &insn->dependants)
|
||||
if (dep->insn->distance > insn->distance)
|
||||
foreach(dep, &insn->dependants)
|
||||
if(dep->insn->distance > insn->distance)
|
||||
insn->distance = dep->insn->distance;
|
||||
/*
|
||||
* While it would be more correct to add one for the cycle
|
||||
@ -463,12 +458,12 @@ static void unblock(struct insn *insn)
|
||||
int slot;
|
||||
|
||||
assert(insn->unresolved);
|
||||
if (--insn->unresolved)
|
||||
if(--insn->unresolved)
|
||||
return;
|
||||
Dprintf(" unblocked %lu -> %u\n", insn-sc->insns, insn->earliest);
|
||||
list_del(&insn->more);
|
||||
slot = insn->earliest;
|
||||
if (slot <= sc->cycle)
|
||||
if(slot <= sc->cycle)
|
||||
slot = sc->cycle+1;
|
||||
list_add_tail(sc->ready+slot, &insn->more);
|
||||
}
|
||||
@ -479,13 +474,13 @@ static void put_reg_by_ref(struct data_ref *ref, int vm_reg)
|
||||
struct insn *setter = ref->dep;
|
||||
struct vm_reg *reg;
|
||||
|
||||
if (setter) {
|
||||
if(setter) {
|
||||
put_reg(setter->vm_insn->dest);
|
||||
if (setter->next_setter && setter->next_setter != ref->insn)
|
||||
if(setter->next_setter && setter->next_setter != ref->insn)
|
||||
unblock(setter->next_setter);
|
||||
} else {
|
||||
reg = sc->regs+vm_reg2idx(vm_reg);
|
||||
if (reg->first_setter && !reg->first_setter->rmw)
|
||||
if(reg->first_setter && !reg->first_setter->rmw)
|
||||
unblock(reg->first_setter);
|
||||
}
|
||||
}
|
||||
@ -493,7 +488,7 @@ static void put_reg_by_ref(struct data_ref *ref, int vm_reg)
|
||||
|
||||
static void unblock_after(struct insn *insn, int cycle)
|
||||
{
|
||||
if (insn->earliest <= cycle)
|
||||
if(insn->earliest <= cycle)
|
||||
insn->earliest = cycle+1;
|
||||
unblock(insn);
|
||||
}
|
||||
@ -511,32 +506,32 @@ static int issue(struct insn *insn, unsigned *code)
|
||||
insn->vm_insn->opb);
|
||||
|
||||
switch (insn->arity) {
|
||||
case 3:
|
||||
put_reg_by_ref(&insn->cond, FPVM_REG_IFB);
|
||||
/* fall through */
|
||||
case 2:
|
||||
CODE(sc->cycle).opb = lookup_pfpu_reg(insn->vm_insn->opb);
|
||||
put_reg_by_ref(&insn->opb, insn->vm_insn->opb);
|
||||
/* fall through */
|
||||
case 1:
|
||||
CODE(sc->cycle).opa = lookup_pfpu_reg(insn->vm_insn->opa);
|
||||
put_reg_by_ref(&insn->opa, insn->vm_insn->opa);
|
||||
break;
|
||||
case 0:
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
case 3:
|
||||
put_reg_by_ref(&insn->cond, FPVM_REG_IFB);
|
||||
/* fall through */
|
||||
case 2:
|
||||
CODE(sc->cycle).opb = lookup_pfpu_reg(insn->vm_insn->opb);
|
||||
put_reg_by_ref(&insn->opb, insn->vm_insn->opb);
|
||||
/* fall through */
|
||||
case 1:
|
||||
CODE(sc->cycle).opa = lookup_pfpu_reg(insn->vm_insn->opa);
|
||||
put_reg_by_ref(&insn->opa, insn->vm_insn->opa);
|
||||
break;
|
||||
case 0:
|
||||
break;
|
||||
default:
|
||||
abort();
|
||||
}
|
||||
|
||||
reg = alloc_reg(insn);
|
||||
if (reg < 0)
|
||||
if(reg < 0)
|
||||
return -1;
|
||||
CODE(end).dest = reg;
|
||||
CODE(sc->cycle).opcode = fpvm_to_pfpu(insn->vm_insn->opcode);
|
||||
|
||||
foreach (ref, &insn->dependants)
|
||||
foreach(ref, &insn->dependants)
|
||||
unblock_after(ref->insn, end);
|
||||
if (insn->next_setter && !insn->next_setter->rmw)
|
||||
if(insn->next_setter && !insn->next_setter->rmw)
|
||||
unblock_after(insn->next_setter,
|
||||
end-insn->next_setter->latency);
|
||||
|
||||
@ -550,7 +545,7 @@ static int count(const struct list *list)
|
||||
int n = 0;
|
||||
const struct list *p;
|
||||
|
||||
for (p = list->next; p != list; p = p->next)
|
||||
for(p = list->next; p != list; p = p->next)
|
||||
n++;
|
||||
return n;
|
||||
}
|
||||
@ -565,8 +560,8 @@ static int schedule(unsigned int *code)
|
||||
struct insn *best;
|
||||
|
||||
remaining = sc->frag->ninstructions;
|
||||
for (i = 0; remaining; i++) {
|
||||
if (i == PFPU_PROGSIZE)
|
||||
for(i = 0; remaining; i++) {
|
||||
if(i == PFPU_PROGSIZE)
|
||||
return -1;
|
||||
|
||||
sc->cycle = i;
|
||||
@ -575,13 +570,13 @@ static int schedule(unsigned int *code)
|
||||
|
||||
list_concat(&sc->waiting, sc->ready+i);
|
||||
best = NULL;
|
||||
foreach (insn, &sc->waiting) {
|
||||
foreach(insn, &sc->waiting) {
|
||||
end = i+insn->latency;
|
||||
if (end >= PFPU_PROGSIZE)
|
||||
if(end >= PFPU_PROGSIZE)
|
||||
return -1;
|
||||
if (!CODE(end).dest) {
|
||||
if(!CODE(end).dest) {
|
||||
#ifdef LCPF
|
||||
if (!best || best->distance < insn->distance)
|
||||
if(!best || best->distance < insn->distance)
|
||||
best = insn;
|
||||
#else
|
||||
best = insn;
|
||||
@ -589,13 +584,13 @@ static int schedule(unsigned int *code)
|
||||
#endif
|
||||
}
|
||||
}
|
||||
if (best) {
|
||||
if (issue(best, code) < 0)
|
||||
if(best) {
|
||||
if(issue(best, code) < 0)
|
||||
return -1;
|
||||
list_del(&best->more);
|
||||
remaining--;
|
||||
}
|
||||
if (CODE(i).dest)
|
||||
if(CODE(i).dest)
|
||||
put_reg(sc->pfpu_regs[CODE(i).dest].vm_reg);
|
||||
}
|
||||
|
||||
@ -604,10 +599,10 @@ static int schedule(unsigned int *code)
|
||||
*/
|
||||
last = i;
|
||||
end = i+MAX_LATENCY;
|
||||
if (end > PFPU_PROGSIZE)
|
||||
if(end > PFPU_PROGSIZE)
|
||||
end = PFPU_PROGSIZE;
|
||||
while (i != end) {
|
||||
if (CODE(i).dest)
|
||||
while(i != end) {
|
||||
if(CODE(i).dest)
|
||||
last = i+1;
|
||||
i++;
|
||||
}
|
||||
@ -615,33 +610,29 @@ static int schedule(unsigned int *code)
|
||||
}
|
||||
|
||||
|
||||
static int init_scheduler_context(struct fpvm_fragment *frag,
|
||||
unsigned int *reg)
|
||||
{
|
||||
sc = calloc(1, sizeof(*sc));
|
||||
if (!sc)
|
||||
return -1;
|
||||
|
||||
sc->frag = frag;
|
||||
|
||||
if (init_registers(frag, reg) < 0) {
|
||||
free(sc);
|
||||
return -1;
|
||||
}
|
||||
|
||||
init_scheduler(frag);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int gfpus_schedule(struct fpvm_fragment *frag, unsigned int *code,
|
||||
unsigned int *reg)
|
||||
{
|
||||
/*
|
||||
* allocate context and registers on stack because standalone FN has no
|
||||
* memory allocator
|
||||
*/
|
||||
struct sched_ctx sc_alloc;
|
||||
struct vm_reg regs[frag->nbindings-frag->next_sur];
|
||||
pfpu_instruction vecout;
|
||||
int res;
|
||||
|
||||
if (init_scheduler_context(frag, reg) < 0)
|
||||
printf("greetings %lu %lu\n", sizeof(*sc), sizeof(regs));
|
||||
sc = &sc_alloc;
|
||||
memset(sc, 0, sizeof(*sc));
|
||||
sc->frag = frag;
|
||||
sc->regs = regs;
|
||||
memset(regs, 0, sizeof(regs));
|
||||
|
||||
if(init_registers(frag, reg) < 0)
|
||||
return -1;
|
||||
init_scheduler(frag);
|
||||
|
||||
memset(code, 0, PFPU_PROGSIZE*sizeof(*code));
|
||||
res = schedule(code);
|
||||
|
||||
@ -649,13 +640,11 @@ int gfpus_schedule(struct fpvm_fragment *frag, unsigned int *code,
|
||||
printf("regs: %d/%d\n", sc->curr_regs, sc->max_regs);
|
||||
#endif
|
||||
|
||||
free(sc->regs);
|
||||
free(sc);
|
||||
if (res < 0)
|
||||
if(res < 0)
|
||||
return res;
|
||||
if (frag->vector_mode)
|
||||
if(frag->vector_mode)
|
||||
return res;
|
||||
if (res == PFPU_PROGSIZE)
|
||||
if(res == PFPU_PROGSIZE)
|
||||
return -1;
|
||||
|
||||
vecout.w = 0;
|
||||
|
Loading…
Reference in New Issue
Block a user