1
0
mirror of git://projects.qi-hardware.com/iris.git synced 2024-07-01 02:32:00 +03:00
This commit is contained in:
Bas Wijnen 2009-05-24 12:22:22 +02:00
parent f800bc51be
commit 12320ef8df
7 changed files with 133 additions and 63 deletions

101
alloc.ccp
View File

@ -1,14 +1,17 @@
#pypp 0
#include "kernel.hh"
// TODO: avoid recursion.
bool Memory::use ():
if used >= limit:
return false
if !parent || parent->use ():
++used
return true
return false
// Go up to parents, incrementing used.
Memory *m
for m = this; m; m = m->parent:
if used >= limit:
// Not allowed. Restore used for all children.
for Memory *r = this; r != m; r = r->parent:
--r->used
return false
++m->used
return true
void Memory::unuse ():
--used
@ -129,21 +132,28 @@ void Object_base::free_obj (Memory *parent):
Page *Memory::alloc_page ():
Page *ret = (Page *)search_free (sizeof (Page), (void **)&pages)
if !ret:
return NULL
ret->physical = 0
return ret
Thread *Memory::alloc_thread ():
Thread *ret = (Thread *)search_free (sizeof (Thread), (void **)&threads)
if !ret:
return NULL
ret->address_space = this
ret->pc = 0
ret->sp = 0
Thread_arch_init (ret)
ret->schedule_prev = NULL
ret->schedule_next = NULL
ret->receivers = NULL
return ret
Message *Memory::alloc_message (Capability *source):
Message *ret = (Message *)search_free (sizeof (Message), (void **)&source->target->messages)
if !ret:
return NULL
for unsigned i = 0; i < 4; ++i:
ret->capabilities[i] = NULL
ret->data[i] = 0
@ -152,6 +162,8 @@ Message *Memory::alloc_message (Capability *source):
Receiver *Memory::alloc_receiver ():
Receiver *ret = (Receiver *)search_free (sizeof (Receiver), (void **)&receivers)
if !ret:
return NULL
ret->owner = NULL
ret->prev_owned = NULL
ret->next_owned = NULL
@ -161,6 +173,8 @@ Receiver *Memory::alloc_receiver ():
Capability *Memory::alloc_capability (Receiver *target, Capability **parent, unsigned protected_data):
Capability *ret = (Capability *)search_free (sizeof (Capability), (void **)&capabilities)
if !ret:
return NULL
ret->target = target
ret->children = NULL
ret->sibling_prev = NULL
@ -170,8 +184,20 @@ Capability *Memory::alloc_capability (Receiver *target, Capability **parent, uns
ret->protected_data = protected_data
return ret
Cappage *Memory::alloc_cappage ():
Cappage *ret = (Cappage *)search_free (sizeof (Cappage), (void **)&cappages)
if !ret:
return NULL
ret->page = (Capability *)zalloc ()
if !ret->page:
free_cappage (ret)
return NULL
return ret
Memory *Memory::alloc_memory ():
Memory *ret = (Memory *)search_free (sizeof (Memory), (void **)&memories)
if !ret:
return NULL
ret->parent = this
ret->frees = NULL
ret->pages = NULL
@ -190,7 +216,8 @@ void Memory::free_page (Page *page):
if page->next:
page->next->prev = page->prev
unuse ()
pfree (page->physical)
if page->physical:
pfree (page->physical)
page->free_obj (this)
void Memory::free_thread (Thread *thread):
@ -207,6 +234,8 @@ void Memory::free_thread (Thread *thread):
first_scheduled = thread->schedule_next
if thread->schedule_next:
thread->schedule_next->schedule_prev = thread->schedule_prev
while thread->receivers:
thread->receivers->orphan ()
thread->free_obj (this)
void Memory::free_message (Message *message):
@ -215,45 +244,39 @@ void Memory::free_message (Message *message):
message->free_obj (this)
void Memory::free_receiver (Receiver *receiver):
if receiver->prev_owned:
receiver->prev_owned->next_owned = receiver->next_owned
else:
receiver->owner->receivers = receiver->next_owned
if receiver->next_owned:
receiver->next_owned->prev_owned = receiver->prev_owned
receiver->orphan ()
while receiver->capabilities:
receiver->capabilities->invalidate ()
while receiver->messages:
free_message (receiver->messages)
receiver->free_obj (this)
void Memory::free_capability (Capability *capability):
if capability->sibling_prev:
capability->sibling_prev->sibling_next = capability->sibling_next
void Receiver::orphan ():
if prev_owned:
prev_owned->next_owned = next_owned
else:
capability->target->capabilities = capability->sibling_next
if capability->sibling_next:
capability->sibling_next->sibling_prev = capability->sibling_prev
// The sibling_prev link is used here to point to the parent.
// This method is used to avoid recursion.
capability->sibling_prev = NULL
Capability *c = capability
while c->children:
c->children->sibling_prev = c
c = c->children
while c:
Capability *next = c->sibling_next
if !next:
next = c->sibling_prev
if next:
next->sibling_prev = c->sibling_prev
c->free_obj (this)
c = next
owner->receivers = next_owned
if next_owned:
next_owned->prev_owned = prev_owned
owner = NULL
void Receiver::own (Thread *o):
if owner:
orphan ()
owner = o
next_owned = o->receivers
if next_owned:
next_owned->prev_owned = this
o->receivers = this
void Memory::free_capability (Capability *capability):
capability->invalidate ()
capability->free_obj (this)
void Capability::invalidate ():
if sibling_prev:
sibling_prev->sibling_next = sibling_next
else:
else if target:
target->capabilities = sibling_next
if sibling_next:
sibling_next->sibling_prev = sibling_prev
@ -277,6 +300,12 @@ void Capability::invalidate ():
c->protected_data = 0
c = next
void Memory::free_cappage (Cappage *p):
for unsigned i = 0; i < CAPPAGE_SIZE; ++i:
p->page[i].invalidate ()
zfree ((unsigned)p->page)
p->free_obj (this)
void Memory::free_memory (Memory *mem):
if mem->prev:
mem->prev->next = mem->next

View File

@ -47,6 +47,7 @@ extern "C" {
#define CAP_CAPABILITY_GET 1
#define CAP_CAPABILITY_SET_DEATH_NOTIFY 2
#define CAPPAGE_SIZE 113
#define CAP_CAPPAGE_SET 1
#define CAP_CAPPAGE_GET 2

View File

@ -118,7 +118,8 @@ static void init_threads ():
panic (0x22446688, "unable to map initial page")
else:
for unsigned p = (shdr->sh_addr & PAGE_MASK); p <= ((shdr->sh_addr + shdr->sh_size - 1) & PAGE_MASK); p += PAGE_SIZE:
Page *page = mem->get_mapping (p)
bool write = false
Page *page = mem->get_mapping (p, &write)
if !page:
page = mem->alloc_page ()
if !page:
@ -127,6 +128,8 @@ static void init_threads ():
if !page->physical || !mem->map (page, p, true):
panic (0x33557799, "unable to map initial bss page")
else:
if !write:
panic (0x20203030, "bss section starts on read-only page")
for unsigned a = p; a < p + PAGE_SIZE; a += 4:
if a >= shdr->sh_addr + shdr->sh_size:
break
@ -134,11 +137,10 @@ static void init_threads ():
continue
((unsigned *)page->physical)[(a & ~PAGE_MASK) >> 2] = 0
for unsigned p = 0; p <= ((thread_start[i + 1] - thread_start[i] - 1) >> PAGE_BITS); ++p:
// TODO: this also skips pages where new space is allocated.
if pages[p]:
continue
++top_memory.limit
top_memory.zfree (thread_start[i] + (p << PAGE_BITS))
top_memory.pfree (thread_start[i] + (p << PAGE_BITS))
Page *stackpage = mem->alloc_page ()
stackpage->physical = mem->zalloc ()
if !stackpage || !mem->map (stackpage, 0x7ffff000, true):
@ -191,7 +193,11 @@ void init ():
top_memory.used = 0
top_memory.arch.directory = NULL
top_memory.arch.asid = 0
for unsigned i = 0; i < 63; ++i:
asids[i] = i + 1
asids[63] = 0
init_threads ()
// Enable all interrupts and say we're handling an exception.

View File

@ -7,17 +7,18 @@ Thread *tlb_refill ():
//panic (0x88776655, "TLB refill")
unsigned EntryHi
cp0_get (CP0_ENTRY_HI, EntryHi)
Page *page0 = current->address_space->get_mapping (EntryHi & ~(1 << 12))
Page *page1 = current->address_space->get_mapping (EntryHi | (1 << 12))
bool write0 = false, write1 = false
Page *page0 = current->address_space->get_mapping (EntryHi & ~(1 << 12), &write0)
Page *page1 = current->address_space->get_mapping (EntryHi | (1 << 12), &write1)
if (!(EntryHi & (1 << 12)) && !page0) || ((EntryHi & (1 << 12)) && !page1):
panic (0x22222222, "no page mapped at requested address")
unsigned low0, low1
if page0:
low0 = (unsigned)page0->physical | 0x18 | 0x4 | 0x2
low0 = ((page0->physical & ~0x80000fff) >> 6) | 0x18 | (write0 ? 0x4 : 0) | 0x2
else
low0 = 0
if page1:
low1 = (unsigned)page1->physical | 0x18 | 0x4 | 0x2
low1 = ((page1->physical & ~0x80000fff) >> 6) | 0x18 | (write1 ? 0x4 : 0) | 0x2
else
low1 = 0
cp0_set (CP0_ENTRY_LO0, low0)
@ -87,7 +88,7 @@ Thread *exception ():
panic (0x91223344, "Breakpoint.")
case 10:
// Reserved instruction.
panic (*(unsigned *)0x004000b0)
panic ((*(unsigned *)0x004000b0) + 1)
panic (0xa1223344, "Reserved instruction.")
case 11:
// Coprocessor unusable.

View File

@ -59,6 +59,8 @@ struct Receiver : public Object <Receiver>:
Receiver *prev_owned, *next_owned
Capability *capabilities
Message *messages
void own (Thread *o)
void orphan ()
struct Capability : public Object <Capability>:
Receiver *target
@ -68,6 +70,9 @@ struct Capability : public Object <Capability>:
void invoke (unsigned d0, unsigned d1, unsigned d2, unsigned d3, Capability *c0, Capability *c1, Capability *c2, Capability *c3)
void invalidate ()
struct Cappage : public Object <Cappage>:
Capability *page
struct Memory : public Object <Memory>:
Memory *parent
Free *frees
@ -81,7 +86,7 @@ struct Memory : public Object <Memory>:
inline bool map (Page *page, unsigned address, bool write)
inline void unmap (Page *page, unsigned address)
inline Page *get_mapping (unsigned address)
inline Page *get_mapping (unsigned address, bool *writable)
// Allocation of pages.
bool use ()
@ -137,7 +142,7 @@ void Memory_arch_init (Memory *mem)
void Memory_arch_free (Memory *mem)
bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write)
void Memory_arch_unmap (Memory *mem, Page *page, unsigned address)
Page *Memory_arch_get_mapping (Memory *mem, unsigned address)
Page *Memory_arch_get_mapping (Memory *mem, unsigned address, bool *writable)
void arch_invoke ()
void arch_schedule (Thread *previous, Thread *target)
@ -145,7 +150,7 @@ bool Memory::map (Page *page, unsigned address, bool write):
return Memory_arch_map (this, page, address, write)
void Memory::unmap (Page *page, unsigned address):
Memory_arch_unmap (this, page, address)
Page *Memory::get_mapping (unsigned address):
return Memory_arch_get_mapping (this, address)
Page *Memory::get_mapping (unsigned address, bool *writable):
return Memory_arch_get_mapping (this, address, writable)
#endif

View File

@ -29,12 +29,20 @@ void Thread_arch_init (Thread *thread):
thread->arch.k1 = 0
void Memory_arch_init (Memory *mem):
++g_asid
if g_asid > 0x3f:
g_asid = 1
mem->arch.asid = g_asid
mem->arch.asid = 1
mem->arch.directory = NULL
static void flush_tlb (unsigned asid):
for unsigned tlb = 1; tlb < 32; ++tlb:
cp0_set (CP0_INDEX, tlb)
__asm__ volatile ("tlbr")
unsigned hi
cp0_get (CP0_ENTRY_HI, hi)
if (hi & 0x1f) == asid:
// Set asid to 0, which is only used by the idle task.
cp0_set (CP0_ENTRY_HI, 0x2000 * tlb)
__asm__ volatile ("tlbwi")
void Memory_arch_free (Memory *mem):
if !mem->arch.directory:
return
@ -50,6 +58,10 @@ void Memory_arch_free (Memory *mem):
mem->unuse ()
mem->zfree ((unsigned)table)
mem->arch.directory[i] = NULL
if (Memory *)asids[mem->arch.asid] == mem:
flush_tlb (mem->arch.asid)
asids[mem->arch.asid] = asids[0]
asids[0] = mem->arch.asid
mem->unuse ()
mem->zfree ((unsigned)mem->arch.directory)
@ -58,25 +70,27 @@ bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write):
mem->arch.directory = (unsigned **)mem->zalloc ()
if !mem->arch.directory:
return false
unsigned *table = mem->arch.directory[(unsigned)address >> 22]
unsigned *table = mem->arch.directory[address >> 22]
if !table:
table = (unsigned *)mem->zalloc ()
if !table:
return false
mem->arch.directory[(unsigned)address >> 22] = table
unsigned idx = ((unsigned)address >> 12) & ((1 << 10) - 1)
mem->arch.directory[address >> 22] = table
unsigned idx = (address >> 12) & ((1 << 10) - 1)
if table[idx]:
mem->unmap ((Page *)(table[idx] & ~3), address)
table[idx] = write ? (unsigned)page : (unsigned)page + 1
return true
void Memory_arch_unmap (Memory *mem, Page *page, unsigned address):
unsigned *table = mem->arch.directory[(unsigned)address >> 22]
table[((unsigned)address >> 12) & ((1 << 10) - 1)] = 0
unsigned *table = mem->arch.directory[address >> 22]
table[(address >> 12) & ((1 << 10) - 1)] = 0
Page *Memory_arch_get_mapping (Memory *mem, unsigned address):
unsigned *table = mem->arch.directory[(unsigned)address >> 22]
unsigned v = table[((unsigned)address >> 12) & ((1 << 10) - 1)]
Page *Memory_arch_get_mapping (Memory *mem, unsigned address, bool *writable):
unsigned *table = mem->arch.directory[address >> 22]
unsigned v = table[(address >> 12) & ((1 << 10) - 1)]
if writable:
*writable = !(v & 1)
return (Page *)(v & ~1)
void arch_invoke ():
@ -95,5 +109,17 @@ void arch_invoke ():
target->invoke (current->arch.t0, current->arch.t1, current->arch.t2, current->arch.t3, c0, c1, c2, c3)
void arch_schedule (Thread *previous, Thread *target):
if (Memory *)asids[target->address_space->arch.asid] != target->address_space:
if asids[0]:
target->address_space->arch.asid = asids[0]
asids[0] = asids[asids[0]]
else:
static unsigned random = 1
target->address_space->arch.asid = random
// Overwrite used asid, so flush those values from tlb.
flush_tlb (random)
++random
if random >= 64:
random = 1
asids[target->address_space->arch.asid] = (unsigned)target
cp0_set (CP0_ENTRY_HI, target->address_space->arch.asid)
// TODO: flush TLB if the asid is already taken.

View File

@ -58,7 +58,9 @@ struct Memory_arch:
unsigned asid
unsigned **directory
EXTERN unsigned g_asid
// Pointers to Memory when asid is taken, index of next free, or 0, if free.
// asid[0] is used as index to first free asid.
EXTERN unsigned asids[64]
// Functions which can be called from assembly must not be mangled.
extern "C":