mirror of
git://projects.qi-hardware.com/iris.git
synced 2024-11-16 18:03:08 +02:00
664 lines
20 KiB
COBOL
664 lines
20 KiB
COBOL
#pypp 0
|
|
// Iris: micro-kernel for a capability-based operating system.
|
|
// alloc.ccp: Allocation of kernel structures.
|
|
// Copyright 2009 Bas Wijnen <wijnen@debian.org>
|
|
//
|
|
// This program is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
//
|
|
// This program is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU General Public License
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
#include "kernel.hh"
|
|
|
|
// Memory model used for kernel structure storage
|
|
// Each Memory object has several pointers, one for each type of objects it contains. These pointers are the start of double-linked lists.
|
|
// Each object also has a NEXT and PREV pointer, which point to the next and previous object in the same page. These pointers are 0 for the first (or last) object in a page. There is no pointer to the first object, it always starts at page_base + SIZE.
|
|
// The PREV/NEXT-list contains all objects in the page, including kFree objects, in the order they appear in the page.
|
|
// The prev/next-lists contain only objects of one type, unsorted.
|
|
// All pointers are to the start of the object. There is a header of size SIZE before it, containing NEXT and PREV.
|
|
|
|
#define PREV(x) (((kObject **)(x))[-2])
|
|
#define NEXT(x) (((kObject **)(x))[-1])
|
|
// ATTENTION! When changing SIZE, be sure to also change the hard-coded 8 in kernel.hhp which defines MAX_NUM_CAPS.
|
|
#define SIZE (2 * sizeof (kObject *))
|
|
|
|
bool kMemory::use (unsigned num):
|
|
// Go up to parents, incrementing used.
|
|
for kMemory *m = this; m; m = m->address_space:
|
|
if used + num > limit:
|
|
// Not allowed. Restore used for all children.
|
|
for kMemory *r = this; r != m; r = r->address_space:
|
|
r->used -= num
|
|
return false
|
|
m->used += num
|
|
return true
|
|
|
|
void kMemory::unuse (unsigned num):
|
|
for kMemory *m = this; m; m = m->address_space:
|
|
m->used -= num
|
|
|
|
// This allocates a new block of memory for use by the kernel.
|
|
// size is the required size of the block (excluding SIZE)
|
|
// first is a pointer to the first object pointer of this type.
|
|
// The result is a block of size at least size, which is linked as an object in the list of first.
|
|
void *kMemory::search_free (unsigned size, void **first):
|
|
kFree *f
|
|
if size >= PAGE_SIZE - SIZE:
|
|
panic (size, "requested size is too large")
|
|
int s = 0
|
|
// Let's see if there already is a kFree chunk which is large enough.
|
|
for f = frees; f; f = (kFree *)f->next:
|
|
if NEXT (f):
|
|
s = (unsigned)NEXT (f) - (unsigned)f
|
|
else:
|
|
s = PAGE_SIZE - ((unsigned)f & ~PAGE_MASK) + SIZE
|
|
// s is now the size of the current free block, including SIZE.
|
|
// The requirement is to fit a block of size, plus its SIZE header.
|
|
if s >= size + SIZE:
|
|
break
|
|
if !f:
|
|
// No chunk was found; allocate a new page and add a chunk in it. It is always large enough.
|
|
unsigned p = palloc ()
|
|
if !p:
|
|
kdebug ("no free space: kernel allocation failed")
|
|
return NULL
|
|
f = (kFree *)(p + SIZE)
|
|
// Mark it as a kFree object.
|
|
f->marker = ~0
|
|
// Link it in the kFree list.
|
|
f->next = frees
|
|
f->prev = NULL
|
|
frees = f
|
|
if f->next:
|
|
((kFree *)f->next)->prev = f
|
|
// There are no other objects in this page.
|
|
NEXT (f) = NULL
|
|
PREV (f) = NULL
|
|
// The size of this block is the entire page.
|
|
s = PAGE_SIZE
|
|
// We have a free block, possibly too large. The block is linked in frees, and in the page.
|
|
if s >= size + sizeof (kFree) + 2 * SIZE:
|
|
// Create the new object at the end and keep the Free.
|
|
// f is the start of the free block
|
|
// f + (s - SIZE) is the end of the free block, compensated for the header of the next block.
|
|
// f + (s - SIZE) - size is the address where the new block should start.
|
|
kFree *obj = (kFree *)((unsigned)f + (s - SIZE) - size)
|
|
// Link the new object in the page.
|
|
NEXT (obj) = NEXT (f)
|
|
if NEXT (obj):
|
|
PREV (NEXT (obj)) = obj
|
|
PREV (obj) = f
|
|
NEXT (f) = obj
|
|
// Set f to the new object, because it is used by that name later.
|
|
f = obj
|
|
else:
|
|
// The block was only just large enough: turn it into a new type. It is already linked into the page.
|
|
// Unlink it from the free list.
|
|
if f->prev:
|
|
((kFree *)f->prev)->next = f->next
|
|
else:
|
|
frees = (kFree *)f->next
|
|
if f->next:
|
|
((kFree *)f->next)->prev = f->prev
|
|
// f is now a block which is linked in the page, but not in any list. Link it into first.
|
|
f->next = (kFree *)*first
|
|
f->prev = NULL
|
|
if f->next:
|
|
((kFree *)f->next)->prev = f
|
|
*first = f
|
|
// Set common initial values.
|
|
f->address_space = this
|
|
f->refs.reset ()
|
|
return f
|
|
|
|
// Free an object; it is still in its list, and it is still in the page list.
|
|
void kMemory::free_obj (kObject *obj, kPointer *first):
|
|
kFree *self = (kFree *)obj
|
|
// Invalidate references.
|
|
while self->refs.valid ():
|
|
self->refs->invalidate ()
|
|
// Free it from its list.
|
|
if self->prev:
|
|
((kFree *)self->prev)->next = self->next
|
|
else:
|
|
*(kPointer *)first = (kPointer)self->next
|
|
if self->next:
|
|
((kFree *)self->next)->prev = self->prev
|
|
// Merge with previous, if it exists and is a kFree.
|
|
if PREV (self) && PREV (self)->is_free ():
|
|
self = (kFree *)PREV (self)
|
|
// Remove the object from the page list.
|
|
NEXT (self) = NEXT (obj)
|
|
if NEXT (self):
|
|
PREV (NEXT (self)) = self
|
|
else:
|
|
// The previous object is not a kFree, so create a new one.
|
|
// It is already linked in the page, but needs to be linked into the free list.
|
|
self->next = frees
|
|
self->prev = NULL
|
|
if self->next:
|
|
((kFree *)self->next)->prev = self
|
|
frees = self
|
|
// Mark it as a kFree.
|
|
self->marker = ~0
|
|
// Merge with next, if it exists and is a kFree.
|
|
if NEXT (self) && NEXT (self)->is_free ():
|
|
// Unlink the next from the frees list.
|
|
kFree *n = (kFree *)NEXT (self)
|
|
if n->prev:
|
|
((kFree *)n->prev)->next = n->next
|
|
else:
|
|
frees = (kFree *)n->next
|
|
if n->next:
|
|
((kFree *)n->next)->prev = n->prev
|
|
// Unlink the next from the page list.
|
|
NEXT (self) = NEXT (NEXT (self))
|
|
if NEXT (self):
|
|
PREV (NEXT (self)) = self
|
|
// Free page if the resulting object is the only thing in it.
|
|
if !PREV (self) && !NEXT (self):
|
|
if self->next:
|
|
((kFree *)self->next)->prev = self->prev
|
|
if self->prev:
|
|
((kFree *)self->prev)->next = self->next
|
|
else:
|
|
frees = (kFree *)self->next
|
|
//kdebug ("freeing page: ")
|
|
//kdebug_num ((unsigned)self - SIZE)
|
|
//kdebug ("\n")
|
|
pfree ((unsigned)self - SIZE)
|
|
|
|
kPage *kMemory::alloc_page ():
|
|
kPage *ret = (kPage *)search_free (sizeof (kPage), (void **)&pages)
|
|
if !ret:
|
|
return NULL
|
|
ret->frame = 0
|
|
ret->flags = 0
|
|
ret->mapping = ~0
|
|
ret->share_prev = NULL
|
|
ret->share_next = NULL
|
|
kPage_arch_init (ret)
|
|
return ret
|
|
|
|
kThread *kMemory::alloc_thread (unsigned size):
|
|
kThread *ret = (kThread *)search_free (sizeof (kThread) + (size - 1) * sizeof (kThread::caps_store), (void **)&threads)
|
|
if !ret:
|
|
return NULL
|
|
ret->receivers = NULL
|
|
ret->pc = 0
|
|
ret->sp = 0
|
|
kThread_arch_init (ret)
|
|
ret->flags = 0
|
|
ret->id = ~0
|
|
ret->schedule_prev = NULL
|
|
ret->schedule_next = NULL
|
|
ret->slots = size
|
|
for unsigned i = 0; i < size; ++i:
|
|
ret->slot[i].prev.thread = NULL
|
|
ret->slot[i].next.thread = NULL
|
|
ret->slot[i].caps = NULL
|
|
//kdebug ("new thread: ")
|
|
//kdebug_num ((unsigned)ret)
|
|
//kdebug ("\n")
|
|
return ret
|
|
|
|
void kCaps::init (unsigned s):
|
|
first_slot.thread = NULL
|
|
size = s
|
|
for unsigned i = 0; i < s; ++i:
|
|
set (i, NULL, 0, kCapRef (), NULL)
|
|
|
|
kCaps *kMemory::alloc_caps (unsigned size):
|
|
if size == 0:
|
|
dpanic (0, "zero-size caps")
|
|
return NULL
|
|
kCaps *ret
|
|
if size > MAX_NUM_CAPS:
|
|
dpanic (size, "requested caps is too large")
|
|
return NULL
|
|
ret = (kCaps *)search_free (sizeof (kCaps) + (size - 1) * sizeof (kCapability), (void **)&capses)
|
|
if !ret:
|
|
return NULL
|
|
ret->init (size)
|
|
//kdebug ("allocate caps ")
|
|
//kdebug_num ((unsigned)ret)
|
|
//kdebug ('+')
|
|
//kdebug_num (size)
|
|
//kdebug ('\n')
|
|
return ret
|
|
|
|
kMessage *kMemory::alloc_message (kReceiver *target):
|
|
kMessage *ret = (kMessage *)search_free (sizeof (kMessage) + sizeof (kCapability), (void **)&target->messages)
|
|
if !ret:
|
|
return NULL
|
|
ret->caps.init (2)
|
|
if !ret->next:
|
|
target->last_message = ret
|
|
return ret
|
|
|
|
kList *kMemory::alloc_list ():
|
|
kList *ret = (kList *)search_free (sizeof (kList), (void **)&lists)
|
|
if !ret:
|
|
return NULL
|
|
ret->owner.init (1)
|
|
ret->first_listitem = NULL
|
|
return ret
|
|
|
|
kListitem *kMemory::alloc_listitem ():
|
|
kListitem *ret = (kListitem *)search_free (sizeof (kListitem), (void **)&listitems)
|
|
if !ret:
|
|
return NULL
|
|
ret->target.init (1)
|
|
ret->list = NULL
|
|
ret->prev_item = NULL
|
|
ret->next_item = NULL
|
|
ret->info = 0
|
|
return ret
|
|
|
|
kReceiver *kMemory::alloc_receiver ():
|
|
kReceiver *ret = (kReceiver *)search_free (sizeof (kReceiver), (void **)&receivers)
|
|
if !ret:
|
|
return NULL
|
|
ret->owner = NULL
|
|
ret->prev_owned = NULL
|
|
ret->next_owned = NULL
|
|
ret->alarm_count = ~0
|
|
ret->caps = NULL
|
|
ret->capabilities.reset ()
|
|
ret->messages = NULL
|
|
ret->last_message = NULL
|
|
ret->reply_protected_data = ~0
|
|
ret->protected_only = false
|
|
ret->queue_limit = ~0
|
|
return ret
|
|
|
|
kMemory *kMemory::alloc_memory ():
|
|
kMemory *ret = (kMemory *)search_free (sizeof (kMemory), (void **)&memories)
|
|
if !ret:
|
|
return NULL
|
|
ret->frees = NULL
|
|
ret->pages = NULL
|
|
ret->threads = NULL
|
|
ret->capses = NULL
|
|
ret->receivers = NULL
|
|
ret->memories = NULL
|
|
ret->limit = ~0
|
|
ret->used = 0
|
|
kMemory_arch_init (ret)
|
|
return ret
|
|
|
|
void kCaps::set (unsigned index, kReceiver *target, Iris::Num pdata, kCapRef parent, kCapRef *parent_ptr):
|
|
if index >= size:
|
|
kdebug ("size: ")
|
|
kdebug_num (size)
|
|
kdebug ("\n")
|
|
dpanic (index, "index too large for kCaps")
|
|
return
|
|
kCapability *c = &caps[index]
|
|
c->target = target
|
|
c->protected_data = pdata
|
|
c->parent = parent
|
|
c->children.reset ()
|
|
c->sibling_prev.reset ()
|
|
if parent.valid ():
|
|
c->sibling_next = parent->children
|
|
parent->children = kCapRef (this, index)
|
|
else:
|
|
if parent_ptr:
|
|
c->sibling_next = *parent_ptr
|
|
*parent_ptr = kCapRef (this, index)
|
|
else:
|
|
c->sibling_next.reset ()
|
|
if c->sibling_next.valid ():
|
|
c->sibling_next->sibling_prev = kCapRef (this, index)
|
|
|
|
void kCaps::clone (unsigned index, kCapRef source, bool copy):
|
|
cap (index)->invalidate ()
|
|
if !source.valid ():
|
|
return
|
|
if copy:
|
|
if source->parent.valid ():
|
|
set (index, source->target, source->protected_data, source->parent)
|
|
else if (unsigned)source->target & ~KERNEL_MASK:
|
|
set (index, source->target, source->protected_data, kCapRef (), &source->target->capabilities)
|
|
else:
|
|
set (index, source->target, source->protected_data, kCapRef (), &((kObject *)source->protected_data.l)->refs)
|
|
else:
|
|
set (index, source->target, source->protected_data, source)
|
|
|
|
void kMemory::free_page (kPage *page):
|
|
if page->mapping != ~0:
|
|
page->address_space->unmap (page)
|
|
page->forget ()
|
|
if page->flags & Iris::Page::PAYING:
|
|
unuse ()
|
|
free_obj (page, (kPointer *)&pages)
|
|
|
|
void kThread::unset_slot (unsigned s):
|
|
if !slot[s].caps:
|
|
return
|
|
if slot[s].prev.thread:
|
|
slot[s].prev.thread->slot[slot[s].prev.index].next = slot[s].next
|
|
else:
|
|
slot[s].caps->first_slot = slot[s].next
|
|
if slot[s].next.thread:
|
|
slot[s].next.thread->slot[slot[s].next.index].prev = slot[s].prev
|
|
slot[s].prev.thread = NULL
|
|
slot[s].next.thread = NULL
|
|
slot[s].caps = NULL
|
|
|
|
void kMemory::free_thread (kThread *thread):
|
|
thread->unrun ()
|
|
while thread->receivers:
|
|
thread->receivers->orphan ()
|
|
for unsigned i = 0; i < thread->slots; ++i:
|
|
thread->unset_slot (i)
|
|
free_obj (thread, (void **)&threads)
|
|
if old_current == thread:
|
|
old_current = NULL
|
|
|
|
void kMemory::free_message (kReceiver *owner, kMessage *message):
|
|
for unsigned i = 0; i < 2; ++i:
|
|
message->caps.cap (i)->invalidate ()
|
|
if !message->next:
|
|
owner->last_message = (kMessageP)message->prev
|
|
free_obj (message, (void **)&owner->messages)
|
|
|
|
void kMemory::free_receiver (kReceiver *receiver):
|
|
receiver->orphan ()
|
|
while receiver->capabilities.valid ():
|
|
receiver->capabilities->invalidate ()
|
|
while receiver->messages:
|
|
free_message (receiver, receiver->messages)
|
|
free_obj (receiver, (void **)&receivers)
|
|
|
|
void kReceiver::orphan ():
|
|
if prev_owned:
|
|
prev_owned->next_owned = next_owned
|
|
else if owner:
|
|
owner->receivers = next_owned
|
|
if next_owned:
|
|
next_owned->prev_owned = prev_owned
|
|
owner = NULL
|
|
|
|
void kReceiver::own (kThread *o):
|
|
if owner:
|
|
orphan ()
|
|
owner = o
|
|
next_owned = o->receivers
|
|
if next_owned:
|
|
next_owned->prev_owned = this
|
|
o->receivers = this
|
|
|
|
void kCapability::invalidate ():
|
|
if !target:
|
|
return
|
|
//kdebug_num ((unsigned)this)
|
|
//kdebug ("\n")
|
|
//kdebug_num ((unsigned)target)
|
|
//kdebug (":")
|
|
//kdebug_num ((unsigned)protected_data.l)
|
|
//kdebug ("\n")
|
|
if (unsigned)this == dbg_code.h:
|
|
dpanic (0, "invalidating watched capability")
|
|
if sibling_prev.valid ():
|
|
sibling_prev->sibling_next = sibling_next
|
|
else if parent.valid ():
|
|
parent->children = sibling_next
|
|
else if (unsigned)target & ~KERNEL_MASK:
|
|
target->capabilities = sibling_next
|
|
else:
|
|
((kObject *)protected_data.l)->refs = sibling_next
|
|
if sibling_next.valid ():
|
|
sibling_next->sibling_prev = sibling_prev
|
|
parent.reset ()
|
|
sibling_prev.reset ()
|
|
sibling_next.reset ()
|
|
kCapability *c = this
|
|
while c:
|
|
while c->children.valid ():
|
|
c = c->children.deref ()
|
|
kCapability *next = c->sibling_next.deref ()
|
|
if !next:
|
|
next = c->parent.deref ()
|
|
c->target = NULL
|
|
if c->parent.valid ():
|
|
c->parent->children = c->sibling_next
|
|
c->parent.reset ()
|
|
c->children.reset ()
|
|
c->sibling_prev.reset ()
|
|
c->sibling_next.reset ()
|
|
c->protected_data = 0
|
|
c = next
|
|
|
|
void kMemory::free_caps (kCaps *c):
|
|
//kdebug ("free caps ")
|
|
//kdebug_num ((unsigned)c)
|
|
//kdebug ('\n')
|
|
for unsigned i = 0; i < c->size; ++i:
|
|
c->cap (i)->invalidate ()
|
|
while c->first_slot.thread:
|
|
c->first_slot.thread->unset_slot (c->first_slot.index)
|
|
free_obj (c, (void **)&capses)
|
|
|
|
void kListitem::add (kList *l):
|
|
// Remove item from list.
|
|
if list:
|
|
if prev_item:
|
|
prev_item->next_item = next_item
|
|
else:
|
|
list->first_listitem = next_item
|
|
if next_item:
|
|
next_item->prev_item = prev_item
|
|
// Notify list owner.
|
|
if list->owner.cap (0):
|
|
kCapability::Context context
|
|
context.data[0] = list->first_listitem != NULL
|
|
context.data[1] = info
|
|
list->owner.cap (0)->invoke (&context)
|
|
// Don't leak info to new owner.
|
|
info = 0
|
|
list = l
|
|
prev_item = NULL
|
|
if !l:
|
|
next_item = NULL
|
|
return
|
|
next_item = l->first_listitem
|
|
l->first_listitem = this
|
|
if next_item:
|
|
next_item->prev_item = this
|
|
|
|
void kMemory::free_listitem (kListitem *i):
|
|
// Unset target.
|
|
i->target.cap (0)->invalidate ()
|
|
// Remove item from its list.
|
|
i->add (NULL)
|
|
// Remove item from its address space.
|
|
free_obj (i, (void **)&listitems)
|
|
|
|
void kMemory::free_list (kList *l):
|
|
// Unset callback.
|
|
l->owner.cap (0)->invalidate ()
|
|
// Clear list.
|
|
while l->first_listitem:
|
|
l->first_listitem->add (NULL)
|
|
// Remove list from address space.
|
|
free_obj (l, (void **)&lists)
|
|
|
|
void kMemory::free_memory (kMemory *mem):
|
|
while mem->pages:
|
|
//kdebug ("freeing page ")
|
|
//kdebug_num ((unsigned)mem->pages)
|
|
//kdebug (", next = ")
|
|
//kdebug_num ((unsigned)mem->pages->next)
|
|
//kdebug ("\n")
|
|
mem->free_page (mem->pages)
|
|
while mem->capses:
|
|
mem->free_caps (mem->capses)
|
|
while mem->threads:
|
|
mem->free_thread (mem->threads)
|
|
while mem->memories:
|
|
mem->free_memory (mem->memories)
|
|
while mem->receivers:
|
|
mem->free_receiver (mem->receivers)
|
|
while mem->lists:
|
|
mem->free_list (mem->lists)
|
|
while mem->listitems:
|
|
mem->free_listitem (mem->listitems)
|
|
if mem->frees:
|
|
panic (0, "kernel memory leak: memory still in use")
|
|
free_obj (mem, (void **)&memories)
|
|
|
|
void kPage::check_payment ():
|
|
kPage *p
|
|
for p = this; p; p = p->share_prev:
|
|
if p->flags & Iris::Page::PAYING:
|
|
return
|
|
for p = share_next; p; p = p->share_next:
|
|
if p->flags & Iris::Page::PAYING:
|
|
return
|
|
// No kPage is paying for this frame anymore.
|
|
raw_pfree (frame)
|
|
kPage *next
|
|
for p = share_prev, next = (p ? p->share_prev : NULL); p; p = next, next = p->share_prev:
|
|
p->frame = NULL
|
|
p->share_prev = NULL
|
|
p->share_next = NULL
|
|
p->flags &= ~(Iris::Page::SHARED | Iris::Page::FRAME)
|
|
kPage_arch_update_mapping (p)
|
|
for p = this, next = p->share_next; p; p = next, next = p->share_next:
|
|
p->frame = NULL
|
|
p->share_prev = NULL
|
|
p->share_next = NULL
|
|
p->flags &= ~(Iris::Page::SHARED | Iris::Page::FRAME)
|
|
kPage_arch_update_mapping (p)
|
|
|
|
void kPage::forget ():
|
|
if share_prev || share_next:
|
|
if share_prev:
|
|
share_prev->share_next = share_next
|
|
if share_next:
|
|
share_next->share_prev = share_prev
|
|
share_next->check_payment ()
|
|
else:
|
|
share_prev->check_payment ()
|
|
share_prev = NULL
|
|
share_next = NULL
|
|
else:
|
|
// If the page has a frame and should be freed, free it.
|
|
if !((flags ^ Iris::Page::FRAME) & (Iris::Page::PHYSICAL | Iris::Page::FRAME)):
|
|
raw_pfree (frame)
|
|
frame = 0
|
|
flags &= ~(Iris::Page::FRAME | Iris::Page::SHARED | Iris::Page::PHYSICAL | Iris::Page::UNCACHED)
|
|
kPage_arch_update_mapping (this)
|
|
|
|
static void check_receiver (kReceiver *r, kCapRef cap, unsigned line):
|
|
if (unsigned)cap->target & ~KERNEL_MASK:
|
|
if cap->target != r:
|
|
dpanic (line, "consistency bug in capabilities")
|
|
else:
|
|
if cap->protected_data.l != (unsigned)r:
|
|
kdebug ("Buggy: receiver=")
|
|
kdebug_num ((unsigned)r)
|
|
kdebug ("; caps=")
|
|
kdebug_num ((unsigned)cap.caps)
|
|
kdebug ("; caps mem=")
|
|
kdebug_num ((unsigned)cap.caps->address_space)
|
|
kdebug ("; cap=")
|
|
kdebug_num ((unsigned)cap.deref ())
|
|
kdebug ("; cap target=")
|
|
kdebug_num ((unsigned)cap->target)
|
|
kdebug ("; protected=")
|
|
kdebug_num (cap->protected_data.l)
|
|
kdebug ("!= receiver\n")
|
|
dpanic (line, "consistency bug in kernel capabilities")
|
|
for kCapRef c = cap->children; c.valid (); c = c->sibling_next:
|
|
if c->protected_data.value () != cap->protected_data.value () || c->target != cap->target:
|
|
dpanic (line, "capability db bug")
|
|
check_receiver (r, c, line)
|
|
|
|
void kReceiver::check (unsigned line):
|
|
for kCapRef cap = capabilities; cap.valid (); cap = cap->sibling_next:
|
|
check_receiver (this, cap, line)
|
|
|
|
void kMemory::check (unsigned line):
|
|
for kReceiver *r = receivers; r; r = (kReceiver *)r->next:
|
|
r->check (line)
|
|
for kThread *t = threads; t; t = (kThread *)t->next:
|
|
if t->flags & Iris::Thread::RUNNING && t->pc == 0:
|
|
kdebug_num ((unsigned)t)
|
|
kdebug ("\n")
|
|
panic (line, "pc is 0")
|
|
for kMemory *m = memories; m; m = (kMemory *)m->next:
|
|
m->check (line)
|
|
|
|
static void print_obj (kObject *o):
|
|
for kObject *obj = o; o; o = (kObject *)o->next:
|
|
kdebug_num ((unsigned)o)
|
|
kdebug ("->")
|
|
kdebug ("NULL\n")
|
|
|
|
void kMemory::print (unsigned line, unsigned indent):
|
|
if indent == 0:
|
|
print_free ()
|
|
for unsigned i = 0; i < indent; ++i:
|
|
kdebug ('\t')
|
|
++indent
|
|
kdebug ("Memory ")
|
|
kdebug_num ((unsigned)this)
|
|
kdebug ("\n")
|
|
for unsigned i = 0; i < indent; ++i:
|
|
kdebug ('\t')
|
|
kdebug ("frees: ")
|
|
for kFree *f = frees; f; f = (kFree *)f->next:
|
|
kdebug_num ((unsigned)f)
|
|
kdebug (":")
|
|
unsigned n = (unsigned)NEXT (f)
|
|
if n:
|
|
n -= (unsigned)f
|
|
if n >= PAGE_SIZE:
|
|
dpanic (0, "invalid kFree")
|
|
kdebug_num (n, 3)
|
|
kdebug ("->")
|
|
kdebug ("NULL\n")
|
|
for unsigned i = 0; i < indent; ++i:
|
|
kdebug ('\t')
|
|
kdebug ("pages: ")
|
|
print_obj (pages)
|
|
for unsigned i = 0; i < indent; ++i:
|
|
kdebug ('\t')
|
|
kdebug ("threads: ")
|
|
print_obj (threads)
|
|
for unsigned i = 0; i < indent; ++i:
|
|
kdebug ('\t')
|
|
kdebug ("receivers: ")
|
|
for kReceiver *r = receivers; r; r = (kReceiver *)r->next:
|
|
kdebug_num ((unsigned)r)
|
|
kdebug ("(")
|
|
for kMessage *m = r->messages; m; m = (kMessage *)m->next:
|
|
kdebug_num ((unsigned)m)
|
|
kdebug ("->")
|
|
kdebug ("NULL)->")
|
|
kdebug ("NULL\n")
|
|
for unsigned i = 0; i < indent; ++i:
|
|
kdebug ('\t')
|
|
kdebug ("capses: ")
|
|
print_obj (capses)
|
|
for kMemory *m = memories; m; m = (kMemory *)m->next:
|
|
m->print (line, indent)
|
|
|
|
void check_impl (kObject *o, unsigned num, char const *msg):
|
|
for ; o; o = (kObject *)o->next:
|
|
unsigned n = (unsigned)NEXT (o)
|
|
unsigned size = n ? n - (unsigned)o : PAGE_SIZE - ((unsigned)o & PAGE_MASK)
|
|
if !check_free (o, size):
|
|
panic (num, msg)
|