mirror of
git://projects.qi-hardware.com/iris.git
synced 2024-11-05 11:31:35 +02:00
333 lines
8.0 KiB
COBOL
333 lines
8.0 KiB
COBOL
#pypp 0
|
|
#include "kernel.hh"
|
|
|
|
#define PREV(x) (((Object_base **)(x))[-2])
|
|
#define NEXT(x) (((Object_base **)(x))[-1])
|
|
#define SIZE (2 * sizeof (unsigned))
|
|
|
|
bool Memory::use ():
|
|
// Go up to parents, incrementing used.
|
|
for Memory *m = this; m; m = m->address_space:
|
|
if used >= limit:
|
|
// Not allowed. Restore used for all children.
|
|
for Memory *r = this; r != m; r = r->address_space:
|
|
--r->used
|
|
return false
|
|
++m->used
|
|
return true
|
|
|
|
void Memory::unuse ():
|
|
for Memory *m = this; m; m = m->address_space:
|
|
--m->used
|
|
|
|
unsigned Memory::palloc ():
|
|
if !use ():
|
|
return NULL
|
|
FreePage *ret = junk_pages
|
|
if !ret:
|
|
ret = zero_pages
|
|
zero_pages = ret->next
|
|
else:
|
|
junk_pages = ret->next
|
|
return (unsigned)ret
|
|
|
|
unsigned Memory::zalloc ():
|
|
if !use ():
|
|
return NULL
|
|
FreePage *ret = zero_pages
|
|
if !ret:
|
|
ret = junk_pages
|
|
for unsigned i = 1; i < (PAGE_SIZE >> 2); ++i:
|
|
((unsigned *)ret)[i] = 0
|
|
junk_pages = ret->next
|
|
else:
|
|
zero_pages = ret->next
|
|
ret->next = NULL
|
|
return (unsigned)ret
|
|
|
|
void Memory::pfree (unsigned page):
|
|
FreePage *p = (FreePage *)page
|
|
p->next = junk_pages
|
|
junk_pages = p
|
|
|
|
void Memory::zfree (unsigned page):
|
|
FreePage *p = (FreePage *)page
|
|
p->next = zero_pages
|
|
zero_pages = p
|
|
|
|
void *Memory::search_free (unsigned size, void **first):
|
|
Free *f
|
|
unsigned s = 0
|
|
for f = frees; f; f = f->next:
|
|
if NEXT (f):
|
|
s = (unsigned)NEXT (f) - (unsigned)f
|
|
else:
|
|
s = PAGE_SIZE - ((unsigned)f & ~PAGE_MASK) + SIZE
|
|
if s >= size + SIZE:
|
|
break
|
|
if !f:
|
|
unsigned p = palloc ()
|
|
if !p:
|
|
return NULL
|
|
f = (Free *)(p + SIZE)
|
|
f->marker = ~0
|
|
f->next = frees
|
|
f->prev = NULL
|
|
frees = f
|
|
if f->next:
|
|
f->next->prev = f
|
|
NEXT (f) = NULL
|
|
PREV (f) = NULL
|
|
s = PAGE_SIZE
|
|
// We have a free block, possibly too large.
|
|
if s >= size + sizeof (Free) + SIZE:
|
|
// Create the new object at the end and keep the Free.
|
|
Free *obj = (Free *)((unsigned)f + s - size - SIZE)
|
|
NEXT (obj) = NEXT (f)
|
|
if NEXT (obj):
|
|
PREV (NEXT (obj)) = obj
|
|
PREV (obj) = f
|
|
NEXT (f) = obj
|
|
f = obj
|
|
else:
|
|
if f->prev:
|
|
f->prev->next = f->next
|
|
else:
|
|
frees = f->next
|
|
if f->next:
|
|
f->next->prev = f->prev
|
|
f->address_space = this
|
|
f->refs = NULL
|
|
f->next = (Free *)*first
|
|
f->prev = NULL
|
|
if f->next:
|
|
f->next->prev = f
|
|
*first = f
|
|
return f
|
|
|
|
void Memory::free_obj (Object_base *obj):
|
|
Free *self
|
|
// Merge with previous, if it exists and is a Free.
|
|
if PREV (obj) && PREV (obj)->is_free ():
|
|
self = (Free *)PREV (obj)
|
|
NEXT (self) = NEXT (obj)
|
|
if NEXT (obj):
|
|
PREV (NEXT (obj)) = self
|
|
else:
|
|
self = (Free *)obj
|
|
self->next = frees
|
|
self->prev = NULL
|
|
if self->next:
|
|
self->next->prev = self
|
|
frees = self
|
|
self->marker = ~0
|
|
// Merge with next, if it exists and is a Free.
|
|
if NEXT (self) && NEXT (self)->is_free ():
|
|
NEXT (self) = NEXT (NEXT (self))
|
|
if NEXT (self):
|
|
PREV (NEXT (self)) = self
|
|
// Free page if the resulting object is the only thing in it.
|
|
if !PREV (self) && !NEXT (self):
|
|
if self->next:
|
|
self->next->prev = self->prev
|
|
if self->prev:
|
|
self->prev->next = self->next
|
|
else:
|
|
frees = self->next
|
|
pfree ((unsigned)(self - SIZE))
|
|
|
|
Page *Memory::alloc_page ():
|
|
Page *ret = (Page *)search_free (sizeof (Page), (void **)&pages)
|
|
if !ret:
|
|
return NULL
|
|
ret->physical = 0
|
|
return ret
|
|
|
|
Thread *Memory::alloc_thread ():
|
|
Thread *ret = (Thread *)search_free (sizeof (Thread), (void **)&threads)
|
|
if !ret:
|
|
return NULL
|
|
ret->address_space = this
|
|
ret->pc = 0
|
|
ret->sp = 0
|
|
Thread_arch_init (ret)
|
|
ret->flags = 0
|
|
ret->schedule_prev = NULL
|
|
ret->schedule_next = NULL
|
|
ret->receivers = NULL
|
|
return ret
|
|
|
|
Message *Memory::alloc_message (Capability *source):
|
|
Message *ret = (Message *)search_free (sizeof (Message), (void **)&source->target->messages)
|
|
if !ret:
|
|
return NULL
|
|
for unsigned i = 0; i < 4; ++i:
|
|
ret->capabilities[i] = NULL
|
|
ret->data[i] = 0
|
|
ret->protected_data = source->protected_data
|
|
return ret
|
|
|
|
Receiver *Memory::alloc_receiver ():
|
|
Receiver *ret = (Receiver *)search_free (sizeof (Receiver), (void **)&receivers)
|
|
if !ret:
|
|
return NULL
|
|
ret->owner = NULL
|
|
ret->prev_owned = NULL
|
|
ret->next_owned = NULL
|
|
ret->capabilities = NULL
|
|
ret->messages = NULL
|
|
return ret
|
|
|
|
Capability *Memory::alloc_capability (Receiver *target, Capability *parent, Capability **parent_ptr, unsigned protected_data, Capability *ret):
|
|
if !ret:
|
|
ret = (Capability *)search_free (sizeof (Capability), (void **)&capabilities)
|
|
if !ret:
|
|
return NULL
|
|
ret->target = target
|
|
ret->parent = parent
|
|
ret->children = NULL
|
|
ret->sibling_prev = NULL
|
|
ret->sibling_next = parent_ptr ? *parent_ptr : NULL
|
|
if ret->sibling_next:
|
|
ret->sibling_next->sibling_prev = ret
|
|
ret->protected_data = protected_data
|
|
return ret
|
|
|
|
Capability *Memory::clone_capability (Capability *source, bool copy, Capability *ret):
|
|
if copy:
|
|
return alloc_capability (source->target, source->parent, source->parent ? &source->parent->children : &source->target->capabilities, source->protected_data, ret)
|
|
else:
|
|
return alloc_capability (source->target, source, &source->children, source->protected_data, ret)
|
|
|
|
Cappage *Memory::alloc_cappage ():
|
|
Cappage *ret = (Cappage *)search_free (sizeof (Cappage), (void **)&cappages)
|
|
if !ret:
|
|
return NULL
|
|
ret->page = (Capability *)zalloc ()
|
|
if !ret->page:
|
|
free_cappage (ret)
|
|
return NULL
|
|
return ret
|
|
|
|
Memory *Memory::alloc_memory ():
|
|
Memory *ret = (Memory *)search_free (sizeof (Memory), (void **)&memories)
|
|
if !ret:
|
|
return NULL
|
|
ret->frees = NULL
|
|
ret->pages = NULL
|
|
ret->threads = NULL
|
|
ret->memories = NULL
|
|
ret->limit = ~0
|
|
ret->used = 0
|
|
Memory_arch_init (ret)
|
|
return ret
|
|
|
|
void Memory::free_page (Page *page):
|
|
if page->prev:
|
|
page->prev->next = page->next
|
|
else:
|
|
pages = page->next
|
|
if page->next:
|
|
page->next->prev = page->prev
|
|
unuse ()
|
|
if page->physical:
|
|
pfree (page->physical)
|
|
free_obj (page)
|
|
|
|
void Memory::free_thread (Thread *thread):
|
|
if thread->prev:
|
|
thread->prev->next = thread->next
|
|
else:
|
|
threads = thread->next
|
|
if thread->next:
|
|
thread->next->prev = thread->prev
|
|
// Unschedule.
|
|
if thread->schedule_prev:
|
|
thread->schedule_prev->schedule_next = thread->schedule_next
|
|
else if first_scheduled == thread:
|
|
first_scheduled = thread->schedule_next
|
|
if thread->schedule_next:
|
|
thread->schedule_next->schedule_prev = thread->schedule_prev
|
|
while thread->receivers:
|
|
thread->receivers->orphan ()
|
|
free_obj (thread)
|
|
|
|
void Memory::free_message (Message *message):
|
|
for unsigned i = 0; i < 4; ++i:
|
|
free_capability (message->capabilities[i])
|
|
free_obj (message)
|
|
|
|
void Memory::free_receiver (Receiver *receiver):
|
|
receiver->orphan ()
|
|
while receiver->capabilities:
|
|
receiver->capabilities->invalidate ()
|
|
while receiver->messages:
|
|
free_message (receiver->messages)
|
|
free_obj (receiver)
|
|
|
|
void Receiver::orphan ():
|
|
if prev_owned:
|
|
prev_owned->next_owned = next_owned
|
|
else:
|
|
owner->receivers = next_owned
|
|
if next_owned:
|
|
next_owned->prev_owned = prev_owned
|
|
owner = NULL
|
|
|
|
void Receiver::own (Thread *o):
|
|
if owner:
|
|
orphan ()
|
|
owner = o
|
|
next_owned = o->receivers
|
|
if next_owned:
|
|
next_owned->prev_owned = this
|
|
o->receivers = this
|
|
|
|
void Memory::free_capability (Capability *capability):
|
|
capability->invalidate ()
|
|
free_obj (capability)
|
|
|
|
void Capability::invalidate ():
|
|
if sibling_prev:
|
|
sibling_prev->sibling_next = sibling_next
|
|
else if target:
|
|
target->capabilities = sibling_next
|
|
if sibling_next:
|
|
sibling_next->sibling_prev = sibling_prev
|
|
Capability *c = this
|
|
while c->children:
|
|
c = c->children
|
|
while c:
|
|
Capability *next = c->sibling_next
|
|
if !next:
|
|
next = c->parent
|
|
c->target = NULL
|
|
c->parent = NULL
|
|
c->children = NULL
|
|
c->sibling_prev = NULL
|
|
c->sibling_next = NULL
|
|
c->protected_data = 0
|
|
c = next
|
|
|
|
void Memory::free_cappage (Cappage *p):
|
|
for unsigned i = 0; i < CAPPAGE_SIZE; ++i:
|
|
p->page[i].invalidate ()
|
|
zfree ((unsigned)p->page)
|
|
free_obj (p)
|
|
|
|
void Memory::free_memory (Memory *mem):
|
|
if mem->prev:
|
|
mem->prev->next = mem->next
|
|
else:
|
|
memories = mem->next
|
|
if mem->next:
|
|
mem->next->prev = mem->prev
|
|
while mem->pages:
|
|
free_page (mem->pages)
|
|
while mem->threads:
|
|
free_thread (mem->threads)
|
|
while mem->memories:
|
|
free_memory (mem->memories)
|
|
Memory_arch_free (mem)
|
|
free_obj (mem)
|