1
0
mirror of git://projects.qi-hardware.com/iris.git synced 2024-11-04 23:34:04 +02:00
iris/mips.ccp
Bas Wijnen ef1b9bfe10 more
2009-06-01 01:12:54 +02:00

363 lines
9.9 KiB
COBOL

#pypp 0
#define ARCH
#include "kernel.hh"
void Thread_arch_init (Thread *thread):
thread->arch.at = 0
thread->arch.v0 = 0
thread->arch.v1 = 0
thread->arch.a0 = 0
thread->arch.a1 = 0
thread->arch.a2 = 0
thread->arch.a3 = 0
thread->arch.t0 = 0
thread->arch.t1 = 0
thread->arch.t2 = 0
thread->arch.t3 = 0
thread->arch.t4 = 0
thread->arch.t5 = 0
thread->arch.t6 = 0
thread->arch.t7 = 0
thread->arch.t8 = 0
thread->arch.t9 = 0
thread->arch.gp = 0
thread->arch.fp = 0
thread->arch.ra = 0
thread->arch.hi = 0
thread->arch.lo = 0
thread->arch.k0 = 0
thread->arch.k1 = 0
void Thread_arch_receive (Thread *thread, unsigned d[4], Capability *c[4]):
thread->arch.a0 = (unsigned)c[0]
thread->arch.a1 = (unsigned)c[1]
thread->arch.a2 = (unsigned)c[2]
thread->arch.a3 = (unsigned)c[3]
thread->arch.t0 = d[0]
thread->arch.t1 = d[1]
thread->arch.t2 = d[2]
thread->arch.t3 = d[3]
thread->arch.v0 = 1
void Thread_arch_receive_fail (Thread *thread):
thread->arch.v0 = 0
unsigned *Thread_arch_info (Thread *thread, unsigned num):
switch num:
case 1:
return &thread->arch.at
case 2:
return &thread->arch.v0
case 3:
return &thread->arch.v1
case 4:
return &thread->arch.a0
case 5:
return &thread->arch.a1
case 6:
return &thread->arch.a2
case 7:
return &thread->arch.a3
case 8:
return &thread->arch.t0
case 9:
return &thread->arch.t1
case 10:
return &thread->arch.t2
case 11:
return &thread->arch.t3
case 12:
return &thread->arch.t4
case 13:
return &thread->arch.t5
case 14:
return &thread->arch.t6
case 15:
return &thread->arch.t7
case 16:
return &thread->arch.s0
case 17:
return &thread->arch.s1
case 18:
return &thread->arch.s2
case 19:
return &thread->arch.s3
case 20:
return &thread->arch.s4
case 21:
return &thread->arch.s5
case 22:
return &thread->arch.s6
case 23:
return &thread->arch.s7
case 24:
return &thread->arch.t8
case 25:
return &thread->arch.t9
case 26:
return &thread->arch.k0
case 27:
return &thread->arch.k1
case 28:
return &thread->arch.gp
case 29:
return &thread->sp
case 30:
return &thread->arch.fp
case 31:
return &thread->arch.ra
default:
return NULL
void Memory_arch_init (Memory *mem):
mem->arch.asid = 1
mem->arch.directory = NULL
mem->arch.shadow = NULL
static void flush_tlb (unsigned asid):
for unsigned tlb = 1; tlb < 32; ++tlb:
cp0_set (CP0_INDEX, tlb)
__asm__ volatile ("tlbr")
unsigned hi
cp0_get (CP0_ENTRY_HI, hi)
if (hi & 0x1f) == asid:
// Set asid to 0, which is only used by the idle task.
cp0_set (CP0_ENTRY_HI, 0x2000 * tlb)
__asm__ volatile ("tlbwi")
void Memory_arch_free (Memory *mem):
while mem->arch.first_page_table:
mem->unmap (mem->arch.first_page_table->first_page->page, mem->arch.first_page_table->first_page->mapping)
if (Memory *)asids[mem->arch.asid] == mem:
flush_tlb (mem->arch.asid)
asids[mem->arch.asid] = asids[0]
asids[0] = mem->arch.asid
mem->unuse ()
mem->zfree ((unsigned)mem->arch.directory)
static arch_page_table *alloc_page_table (Memory *mem):
arch_page_table *ret = (arch_page_table *)mem->search_free (sizeof (arch_page_table), (void **)&mem->arch.first_page_table)
if !ret:
return NULL
ret->first_page = NULL
return ret
static arch_page *alloc_page (Memory *mem, arch_page_table *t):
arch_page *ret = (arch_page *)mem->search_free (sizeof (arch_page), (void **)&t->first_page)
if !ret:
return NULL
ret->page = NULL
ret->mapping = ~0
ret->prev_mapped = NULL
ret->next_mapped = NULL
return ret
static void free_page_table (arch_page_table *t, unsigned idx):
Memory *mem = t->address_space
if t->next:
t->next->prev = t->prev
if t->prev:
t->prev->next = t->next
else:
mem->arch.first_page_table = t->next
mem->zfree ((unsigned)mem->arch.directory[idx])
mem->arch.directory[idx] = NULL
mem->arch.shadow[idx] = NULL
mem->free_obj (t)
if !mem->arch.first_page_table:
mem->zfree ((unsigned)mem->arch.directory)
mem->zfree ((unsigned)mem->arch.shadow)
mem->arch.directory = NULL
mem->arch.shadow = NULL
static void tlb_reset (unsigned address, unsigned asid, unsigned value):
cp0_set (CP0_ENTRY_HI, address | asid)
__asm__ volatile ("tlbp")
unsigned idx
cp0_get (CP0_INDEX, idx)
if ~idx & 0x80000000:
if address & (1 << PAGE_BITS):
cp0_set (CP0_ENTRY_LO1, value)
else:
cp0_set (CP0_ENTRY_LO0, value)
__asm__ volatile ("tlbwi")
static void free_page (arch_page_table *t, arch_page *p):
if p->next:
p->next->prev = p->prev
if p->prev:
p->prev->next = p->next
else:
t->first_page = p->next
if p->prev_mapped:
p->prev_mapped->next_mapped = p->next_mapped
else:
p->page->arch.first_mapped = p->next_mapped
if p->next_mapped:
p->next_mapped->prev_mapped = p->prev_mapped
tlb_reset (p->mapping, p->address_space->arch.asid, 0)
unsigned idx = p->mapping >> 21
p->address_space->free_obj (p)
if !t->first_page:
free_page_table (t, idx)
static unsigned make_entry_lo (Page *page, bool write):
if !page->data.frame:
return 0
unsigned flags
if write:
flags = 0x18 | 0x4 | 0x2
else
flags = 0x18 | 0x2
return ((page->data.frame & ~0x80000000) >> 6) | flags
bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write):
if address >= 0x80000000:
return false
address &= PAGE_MASK
if !mem->arch.directory:
mem->arch.directory = (unsigned **)mem->zalloc ()
if !mem->arch.directory:
return false
mem->arch.shadow = (arch_page_table **)mem->zalloc ()
if !mem->arch.shadow:
mem->zfree ((unsigned)mem->arch.directory)
mem->arch.directory = NULL
return false
unsigned *table = mem->arch.directory[address >> 21]
arch_page_table *t = mem->arch.shadow[address >> 21]
if !table:
table = (unsigned *)mem->zalloc ()
if !table:
if !mem->arch.first_page_table:
mem->zfree ((unsigned)mem->arch.directory)
mem->zfree ((unsigned)mem->arch.shadow)
mem->arch.directory = NULL
mem->arch.shadow = NULL
return false
t = alloc_page_table (mem)
if !t:
mem->zfree ((unsigned)table)
if !mem->arch.first_page_table:
mem->zfree ((unsigned)mem->arch.directory)
mem->zfree ((unsigned)mem->arch.shadow)
mem->arch.directory = NULL
mem->arch.shadow = NULL
return false
mem->arch.directory[address >> 21] = table
mem->arch.shadow[address >> 21] = t
arch_page *p = alloc_page (mem, t)
if !p:
if !t->first_page:
// This automatically cleans up the rest.
free_page_table (t, address >> 21)
return false
unsigned idx = (address >> 12) & ((1 << 9) - 1)
if table[idx]:
mem->unmap ((Page *)table[idx + 0x200], address)
table[idx] = make_entry_lo (page, write)
table[idx + 0x200] = (unsigned)p
p->mapping = address + write
p->page = page
p->next_mapped = page->arch.first_mapped
if p->next_mapped:
p->next_mapped->prev_mapped = p
page->arch.first_mapped = p
return true
void Memory_arch_unmap (Memory *mem, Page *page, unsigned address):
unsigned didx = address >> 21
unsigned tidx = (address >> 12) & ((1 << 9) - 1)
unsigned *table = mem->arch.directory[didx]
arch_page_table *t = mem->arch.shadow[didx]
table[tidx] = 0
arch_page *p = (arch_page *)table[tidx + 0x200]
table[tidx + 0x200] = 0
free_page (t, p)
Page *Memory_arch_get_mapping (Memory *mem, unsigned address, bool *writable):
if address >= 0x80000000:
return NULL
unsigned *table = mem->arch.directory[address >> 21]
unsigned idx = (address >> 12) & ((1 << 9) - 1)
arch_page *page = (arch_page *)table[idx + 0x200]
if writable:
*writable = table[idx] & 4
return page->page
void Page_arch_update_mapping (Page *page):
if !page->arch.first_mapped:
return
Memory *as = page->address_space
unsigned target = make_entry_lo (page, page->data.flags & PAGE_FLAG_WRITABLE)
for arch_page *p = page->arch.first_mapped; p; p = p->next_mapped:
unsigned de = p->mapping >> 21
unsigned te = (p->mapping >> 12) & ((1 << 9) - 1)
bool write = p->mapping & 1
unsigned t
if p->mapping & 1:
t = target
else:
t = target & ~0x4
as->arch.directory[de][te] = t
tlb_reset (p->mapping & ~1, as->arch.asid, t)
void arch_invoke ():
Capability *target, *c[4]
bool wait, copy[4]
Thread *caller = current
target = caller->address_space->find_capability (current->arch.v0, &wait)
if !target:
// TODO: there must be no action here. This is just because the rest doesn't work yet.
dbg_led (caller->arch.a0, caller->arch.a1, caller->arch.a2)
dbg_sleep (1000)
schedule ()
// Calling an invalid capability always fails.
caller->arch.v0 = 0
else:
if wait:
caller->wait ()
c[0] = caller->address_space->find_capability (caller->arch.a0, &copy[0])
c[1] = caller->address_space->find_capability (caller->arch.a1, &copy[1])
c[2] = caller->address_space->find_capability (caller->arch.a2, &copy[2])
c[3] = caller->address_space->find_capability (caller->arch.a3, &copy[3])
unsigned d[4]
d[0] = caller->arch.t0
d[1] = caller->arch.t1
d[2] = caller->arch.t2
d[3] = caller->arch.t3
caller->arch.v0 = target->invoke (d, c, copy) ? 1 : 0
if caller != current:
if (Memory *)asids[current->address_space->arch.asid] != current->address_space:
if asids[0]:
current->address_space->arch.asid = asids[0]
asids[0] = asids[asids[0]]
else:
static unsigned random = 1
current->address_space->arch.asid = random
// Overwrite used asid, so flush those values from tlb.
flush_tlb (random)
++random
if random >= 64:
random = 1
asids[current->address_space->arch.asid] = (unsigned)current->address_space
cp0_set (CP0_ENTRY_HI, current->address_space->arch.asid)
directory = current->address_space->arch.directory
unsigned status
cp0_get (CP0_STATUS, status)
status &= 0x0fffffff
if current->flags & THREAD_FLAG_PRIV:
status |= 0x10000000
cp0_set (CP0_STATUS, status | 0x13)
void arch_register_interrupt (unsigned num, Receiver *r):
arch_interrupt_receiver[num] = r
unsigned status
cp0_get (CP0_STATUS, status)
// And enable or disable the interrupt.
if r:
status |= 1 << (num + 8)
else:
status &= ~(1 << (num + 8))
cp0_set (CP0_STATUS, status)