mirror of
git://projects.qi-hardware.com/iris.git
synced 2024-11-05 08:37:30 +02:00
388 lines
12 KiB
COBOL
388 lines
12 KiB
COBOL
#pypp 0
|
|
// Iris: micro-kernel for a capability-based operating system.
|
|
// alloc.ccp: Allocation of kernel structures.
|
|
// Copyright 2009 Bas Wijnen <wijnen@debian.org>
|
|
//
|
|
// This program is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
//
|
|
// This program is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU General Public License
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#include "kernel.hh"
|
|
|
|
// Memory model used for kernel structure storage
|
|
// Each Memory object has several pointers, one for each type of objects it contains. These pointers are the start of double-linked lists.
|
|
// Each object also has a NEXT and PREV pointer, which point to the next and previous object in the same page. These pointers are 0 for the first (or last) object in a page. There is no pointer to the first object, it always starts at page_base + SIZE.
|
|
// The PREV/NEXT-list contains all objects in the page, including kFree objects, in the order they appear in the page.
|
|
// The prev/next-lists contain only objects of one type, unsorted.
|
|
// All pointers are to the start of the object. There is a header of size SIZE before it, containing NEXT and PREV.
|
|
|
|
#define PREV(x) (((kObject **)(x))[-2])
|
|
#define NEXT(x) (((kObject **)(x))[-1])
|
|
#define SIZE (2 * sizeof (kObject *))
|
|
|
|
bool kMemory::use (unsigned num):
|
|
// Go up to parents, incrementing used.
|
|
for kMemory *m = this; m; m = m->address_space:
|
|
if used + num > limit:
|
|
// Not allowed. Restore used for all children.
|
|
for kMemory *r = this; r != m; r = r->address_space:
|
|
r->used -= num
|
|
return false
|
|
m->used += num
|
|
return true
|
|
|
|
void kMemory::unuse (unsigned num):
|
|
for kMemory *m = this; m; m = m->address_space:
|
|
m->used -= num
|
|
|
|
// This allocates a new block of memory for use by the kernel.
|
|
// size is the requires size of the block (excluding SIZE)
|
|
// first is a pointer to the first object pointer of this type.
|
|
// The result is a block of size at least size, which is linked as an object in the list of first.
|
|
void *kMemory::search_free (unsigned size, void **first):
|
|
kFree *f
|
|
unsigned s = 0
|
|
// Let's see if there already is a kFree chunk which is large enough.
|
|
for f = frees; f; f = (kFree *)f->next:
|
|
if NEXT (f):
|
|
s = (unsigned)NEXT (f) - (unsigned)f
|
|
else:
|
|
s = PAGE_SIZE - ((unsigned)f & ~PAGE_MASK) + SIZE
|
|
// s is now the size of the current free block, including SIZE.
|
|
// The requirement is to fit a block of size, plus its SIZE header.
|
|
if s >= size + SIZE:
|
|
break
|
|
if !f:
|
|
// No chunk was found; allocate a new page and add a chunk in it. It is always large enough.
|
|
unsigned p = palloc ()
|
|
if !p:
|
|
dbg_log ("no free space: kernel allocation failed")
|
|
return NULL
|
|
f = (kFree *)(p + SIZE)
|
|
// Mark it as a kFree object.
|
|
f->marker = ~0
|
|
// Link it in the kFree list.
|
|
f->next = frees
|
|
f->prev = NULL
|
|
frees = f
|
|
if f->next:
|
|
((kFree *)f->next)->prev = f
|
|
// There are no other objects in this page.
|
|
NEXT (f) = NULL
|
|
PREV (f) = NULL
|
|
// The size of this block is the entire page.
|
|
s = PAGE_SIZE
|
|
// We have a free block, possibly too large. The block is linked in frees, and in the page.
|
|
if s >= size + sizeof (kFree) + 2 * SIZE:
|
|
// Create the new object at the end and keep the Free.
|
|
// f is the start of the free block
|
|
// f + (s - SIZE) is the end of the free block, compensated for the header of the next block.
|
|
// f + (s - SIZE) - size is the address where the new block should start.
|
|
kFree *obj = (kFree *)((unsigned)f + (s - SIZE) - size)
|
|
// Link the new object in the page.
|
|
NEXT (obj) = NEXT (f)
|
|
if NEXT (obj):
|
|
PREV (NEXT (obj)) = obj
|
|
PREV (obj) = f
|
|
NEXT (f) = obj
|
|
// Set f to the new object, because it is used by that name later.
|
|
f = obj
|
|
else:
|
|
// The block was only just large enough: turn it into a new type. It is already linked into the page.
|
|
// Unlink it from the free list.
|
|
if f->prev:
|
|
((kFree *)f->prev)->next = f->next
|
|
else:
|
|
frees = (kFree *)f->next
|
|
if f->next:
|
|
((kFree *)f->next)->prev = f->prev
|
|
// f is now a block which is linked in the page, but not in any list. Link it into first.
|
|
f->next = (kFree *)*first
|
|
f->prev = NULL
|
|
if f->next:
|
|
((kFree *)f->next)->prev = f
|
|
*first = f
|
|
// Set common initial values.
|
|
f->address_space = this
|
|
f->refs.reset ()
|
|
return f
|
|
|
|
// Free an object; it is still in its list, and it is still in the page list.
|
|
void kMemory::free_obj (kObject *obj, kPointer *first):
|
|
kFree *self = (kFree *)obj
|
|
// Invalidate references.
|
|
while self->refs.valid ():
|
|
self->refs->invalidate ()
|
|
// Free it from its list.
|
|
if self->prev:
|
|
((kFree *)self->prev)->next = self->next
|
|
else:
|
|
*(kPointer *)first = (kPointer)self->next
|
|
if self->next:
|
|
((kFree *)self->next)->prev = self->prev
|
|
// Merge with previous, if it exists and is a kFree.
|
|
if PREV (self) && PREV (self)->is_free ():
|
|
self = (kFree *)PREV (self)
|
|
// Remove the object from the page list.
|
|
NEXT (self) = NEXT (obj)
|
|
if NEXT (self):
|
|
PREV (NEXT (self)) = self
|
|
else:
|
|
// The previous object is not a kFree, so create a new one.
|
|
// It is already linked in the page, but needs to be linked into the free list.
|
|
self->next = frees
|
|
self->prev = NULL
|
|
if self->next:
|
|
((kFree *)self->next)->prev = self
|
|
frees = self
|
|
// Mark it as a kFree.
|
|
self->marker = ~0
|
|
// Merge with next, if it exists and is a kFree.
|
|
if NEXT (self) && NEXT (self)->is_free ():
|
|
// Unlink the next from the frees list.
|
|
kFree *n = (kFree *)NEXT (self)
|
|
if n->prev:
|
|
((kFree *)n->prev)->next = n->next
|
|
else:
|
|
frees = (kFree *)n->next
|
|
if n->next:
|
|
((kFree *)n->next)->prev = n->prev
|
|
// Unlink the next from the page list.
|
|
NEXT (self) = NEXT (NEXT (self))
|
|
if NEXT (self):
|
|
PREV (NEXT (self)) = self
|
|
// Free page if the resulting object is the only thing in it.
|
|
if !PREV (self) && !NEXT (self):
|
|
if self->next:
|
|
((kFree *)self->next)->prev = self->prev
|
|
if self->prev:
|
|
((kFree *)self->prev)->next = self->next
|
|
else:
|
|
frees = (kFree *)self->next
|
|
pfree ((unsigned)self - SIZE)
|
|
|
|
kPage *kMemory::alloc_page ():
|
|
kPage *ret = (kPage *)search_free (sizeof (kPage), (void **)&pages)
|
|
if !ret:
|
|
return NULL
|
|
ret->frame = 0
|
|
ret->flags = 0
|
|
return ret
|
|
|
|
kThread *kMemory::alloc_thread (unsigned size):
|
|
kThread *ret = (kThread *)search_free (sizeof (kThread) + (size - 1) * sizeof (kCapsP), (void **)&threads)
|
|
if !ret:
|
|
return NULL
|
|
ret->receivers = NULL
|
|
ret->pc = 0
|
|
ret->sp = 0
|
|
kThread_arch_init (ret)
|
|
ret->flags = 0
|
|
ret->schedule_prev = NULL
|
|
ret->schedule_next = NULL
|
|
ret->slots = size
|
|
for unsigned i = 0; i < size; ++i:
|
|
ret->caps[i] = NULL
|
|
return ret
|
|
|
|
kMessage *kMemory::alloc_message (kReceiver *target):
|
|
kMessage *ret = (kMessage *)search_free (sizeof (kMessage) + sizeof (kCapability), (void **)&target->messages)
|
|
if !ret:
|
|
return NULL
|
|
ret->caps.size = 2
|
|
if !ret->next:
|
|
target->last_message = ret
|
|
return ret
|
|
|
|
kReceiver *kMemory::alloc_receiver ():
|
|
kReceiver *ret = (kReceiver *)search_free (sizeof (kReceiver), (void **)&receivers)
|
|
if !ret:
|
|
return NULL
|
|
ret->owner = NULL
|
|
ret->prev_owned = NULL
|
|
ret->next_owned = NULL
|
|
ret->alarm_count = ~0
|
|
ret->caps = NULL
|
|
ret->capabilities.reset ()
|
|
ret->messages = NULL
|
|
ret->last_message = NULL
|
|
ret->reply_protected_data = ~0
|
|
ret->protected_only = false
|
|
ret->queue_limit = ~0
|
|
return ret
|
|
|
|
kCaps *kMemory::alloc_caps (unsigned size):
|
|
kCaps *ret = (kCaps *)search_free (sizeof (kCaps) + (size - 1) * sizeof (kCapability), (void **)&capses)
|
|
if !ret:
|
|
return NULL
|
|
ret->size = size
|
|
for unsigned i = 0; i < size; ++i:
|
|
ret->set (i, NULL, 0, kCapRef (), NULL)
|
|
return ret
|
|
|
|
kMemory *kMemory::alloc_memory ():
|
|
kMemory *ret = (kMemory *)search_free (sizeof (kMemory), (void **)&memories)
|
|
if !ret:
|
|
return NULL
|
|
ret->frees = NULL
|
|
ret->pages = NULL
|
|
ret->threads = NULL
|
|
ret->capses = NULL
|
|
ret->receivers = NULL
|
|
ret->memories = NULL
|
|
ret->limit = ~0
|
|
ret->used = 0
|
|
kMemory_arch_init (ret)
|
|
return ret
|
|
|
|
void kCaps::set (unsigned index, kReceiver *target, Kernel::Num pdata, kCapRef parent, kCapRef *parent_ptr):
|
|
caps[index].target = target
|
|
caps[index].protected_data = pdata
|
|
caps[index].parent = parent
|
|
caps[index].children.reset ()
|
|
caps[index].sibling_prev.reset ()
|
|
if parent.valid ():
|
|
caps[index].sibling_next = parent->children
|
|
parent->children = kCapRef (this, index)
|
|
else:
|
|
if parent_ptr:
|
|
caps[index].sibling_next = *parent_ptr
|
|
*parent_ptr = kCapRef (this, index)
|
|
else:
|
|
caps[index].sibling_next.reset ()
|
|
if caps[index].sibling_next.valid ():
|
|
caps[index].sibling_next->sibling_prev = kCapRef (this, index)
|
|
|
|
void kCaps::clone (unsigned index, kCapRef source, bool copy):
|
|
cap (index)->invalidate ()
|
|
if !source.valid ():
|
|
return
|
|
if copy:
|
|
if source->parent.valid ():
|
|
set (index, source->target, source->protected_data, source->parent)
|
|
else if (unsigned)source->target & ~KERNEL_MASK:
|
|
set (index, source->target, source->protected_data, kCapRef (), &source->target->capabilities)
|
|
else:
|
|
set (index, source->target, source->protected_data, kCapRef (), &((kObject *)source->protected_data.l)->refs)
|
|
else:
|
|
set (index, source->target, source->protected_data, source)
|
|
|
|
void kMemory::free_page (kPage *page):
|
|
if page->flags & Kernel::Page::PAYING:
|
|
unuse ()
|
|
if page->frame:
|
|
pfree (page->frame)
|
|
free_obj (page, (kPointer *)&pages)
|
|
|
|
void kMemory::free_thread (kThread *thread):
|
|
thread->unrun ()
|
|
while thread->receivers:
|
|
thread->receivers->orphan ()
|
|
free_obj (thread, (void **)&threads)
|
|
|
|
void kMemory::free_message (kReceiver *owner, kMessage *message):
|
|
if !message->next:
|
|
owner->last_message = (kMessageP)message->prev
|
|
free_obj (message, (void **)&owner->messages)
|
|
|
|
void kMemory::free_receiver (kReceiver *receiver):
|
|
receiver->orphan ()
|
|
while receiver->capabilities.valid ():
|
|
receiver->capabilities->invalidate ()
|
|
while receiver->messages:
|
|
free_message (receiver, receiver->messages)
|
|
free_obj (receiver, (void **)&receivers)
|
|
|
|
void kReceiver::orphan ():
|
|
if prev_owned:
|
|
prev_owned->next_owned = next_owned
|
|
else:
|
|
owner->receivers = next_owned
|
|
if next_owned:
|
|
next_owned->prev_owned = prev_owned
|
|
owner = NULL
|
|
|
|
void kReceiver::own (kThread *o):
|
|
if owner:
|
|
orphan ()
|
|
owner = o
|
|
next_owned = o->receivers
|
|
if next_owned:
|
|
next_owned->prev_owned = this
|
|
o->receivers = this
|
|
|
|
void kCapability::invalidate ():
|
|
if !target:
|
|
return
|
|
if sibling_prev.valid ():
|
|
sibling_prev->sibling_next = sibling_next
|
|
else if (unsigned)target & ~KERNEL_MASK:
|
|
target->capabilities = sibling_next
|
|
else:
|
|
((kObject *)protected_data.l)->refs = sibling_next
|
|
if sibling_next.valid ():
|
|
sibling_next->sibling_prev = sibling_prev
|
|
parent.reset ()
|
|
sibling_prev.reset ()
|
|
sibling_next.reset ()
|
|
kCapability *c = this
|
|
while c:
|
|
while c->children.valid ():
|
|
c = c->children.deref ()
|
|
kCapability *next = c->sibling_next.deref ()
|
|
if !next:
|
|
next = c->parent.deref ()
|
|
c->target = NULL
|
|
c->parent.reset ()
|
|
c->children.reset ()
|
|
c->sibling_prev.reset ()
|
|
c->sibling_next.reset ()
|
|
c->protected_data = 0
|
|
c = next
|
|
|
|
void kMemory::free_caps (kCaps *c):
|
|
for unsigned i = 0; i < c->size; ++i:
|
|
c->caps[i].invalidate ()
|
|
free_obj (c, (void **)&capses)
|
|
|
|
void kMemory::free_memory (kMemory *mem):
|
|
while mem->pages:
|
|
free_page (mem->pages)
|
|
while mem->capses:
|
|
free_caps (mem->capses)
|
|
while mem->threads:
|
|
free_thread (mem->threads)
|
|
while mem->memories:
|
|
free_memory (mem->memories)
|
|
while mem->receivers:
|
|
free_receiver (mem->receivers)
|
|
kMemory_arch_free (mem)
|
|
if mem->frees:
|
|
panic (0, "kernel memory leak: memory still in use")
|
|
free_obj (mem, (void **)&memories)
|
|
|
|
void kPage::forget ():
|
|
if share_prev || share_next:
|
|
if share_prev:
|
|
share_prev->share_next = share_next
|
|
if share_next:
|
|
share_next->share_prev = share_prev
|
|
share_prev = NULL
|
|
share_next = NULL
|
|
else:
|
|
// If the page has a frame and should be freed, free it.
|
|
if !((flags ^ Kernel::Page::FRAME) & (Kernel::Page::PHYSICAL | Kernel::Page::FRAME)):
|
|
raw_pfree (frame)
|
|
frame = 0
|
|
flags &= ~(Kernel::Page::FRAME | Kernel::Page::SHARED | Kernel::Page::PHYSICAL | Kernel::Page::UNCACHED)
|
|
kPage_arch_update_mapping (this)
|