#pypp 0 // Iris: micro-kernel for a capability-based operating system. // alloc.ccp: Allocation of kernel structures. // Copyright 2009 Bas Wijnen // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see . #include "kernel.hh" // Memory model used for kernel structure storage // Each Memory object has several pointers, one for each type of objects it contains. These pointers are the start of double-linked lists. // Each object also has a NEXT and PREV pointer, which point to the next and previous object in the same page. These pointers are 0 for the first (or last) object in a page. There is no pointer to the first object, it always starts at page_base + SIZE. // The PREV/NEXT-list contains all objects in the page, including kFree objects, in the order they appear in the page. // The prev/next-lists contain only objects of one type, unsorted. // All pointers are to the start of the object. There is a header of size SIZE before it, containing NEXT and PREV. #define PREV(x) (((kObject **)(x))[-2]) #define NEXT(x) (((kObject **)(x))[-1]) #define SIZE (2 * sizeof (kObject *)) bool kMemory::use (unsigned num): // Go up to parents, incrementing used. for kMemory *m = this; m; m = m->address_space: if used + num > limit: // Not allowed. Restore used for all children. for kMemory *r = this; r != m; r = r->address_space: r->used -= num return false m->used += num return true void kMemory::unuse (unsigned num): for kMemory *m = this; m; m = m->address_space: m->used -= num // This allocates a new block of memory for use by the kernel. // size is the requires size of the block (excluding SIZE) // first is a pointer to the first object pointer of this type. // The result is a block of size at least size, which is linked as an object in the list of first. void *kMemory::search_free (unsigned size, void **first): kFree *f unsigned s = 0 // Let's see if there already is a kFree chunk which is large enough. for f = frees; f; f = (kFree *)f->next: if NEXT (f): s = (unsigned)NEXT (f) - (unsigned)f else: s = PAGE_SIZE - ((unsigned)f & ~PAGE_MASK) + SIZE // s is now the size of the current free block, including SIZE. // The requirement is to fit a block of size, plus its SIZE header. if s >= size + SIZE: break if !f: // No chunk was found; allocate a new page and add a chunk in it. It is always large enough. unsigned p = palloc () if !p: dbg_log ("no free space: kernel allocation failed") return NULL f = (kFree *)(p + SIZE) // Mark it as a kFree object. f->marker = ~0 // Link it in the kFree list. f->next = frees f->prev = NULL frees = f if f->next: ((kFree *)f->next)->prev = f // There are no other objects in this page. NEXT (f) = NULL PREV (f) = NULL // The size of this block is the entire page. s = PAGE_SIZE // We have a free block, possibly too large. The block is linked in frees, and in the page. if s >= size + sizeof (kFree) + 2 * SIZE: // Create the new object at the end and keep the Free. // f is the start of the free block // f + (s - SIZE) is the end of the free block, compensated for the header of the next block. // f + (s - SIZE) - size is the address where the new block should start. kFree *obj = (kFree *)((unsigned)f + (s - SIZE) - size) // Link the new object in the page. NEXT (obj) = NEXT (f) if NEXT (obj): PREV (NEXT (obj)) = obj PREV (obj) = f NEXT (f) = obj // Set f to the new object, because it is used by that name later. f = obj else: // The block was only just large enough: turn it into a new type. It is already linked into the page. // Unlink it from the free list. if f->prev: ((kFree *)f->prev)->next = f->next else: frees = (kFree *)f->next if f->next: ((kFree *)f->next)->prev = f->prev // f is now a block which is linked in the page, but not in any list. Link it into first. f->next = (kFree *)*first f->prev = NULL if f->next: ((kFree *)f->next)->prev = f *first = f // Set common initial values. f->address_space = this f->refs.reset () return f // Free an object; it is still in its list, and it is still in the page list. void kMemory::free_obj (kObject *obj, kPointer *first): kFree *self = (kFree *)obj // Invalidate references. while self->refs.valid (): self->refs->invalidate () // Free it from its list. if self->prev: ((kFree *)self->prev)->next = self->next else: *(kPointer *)first = (kPointer)self->next if self->next: ((kFree *)self->next)->prev = self->prev // Merge with previous, if it exists and is a kFree. if PREV (self) && PREV (self)->is_free (): self = (kFree *)PREV (self) // Remove the object from the page list. NEXT (self) = NEXT (obj) if NEXT (self): PREV (NEXT (self)) = self else: // The previous object is not a kFree, so create a new one. // It is already linked in the page, but needs to be linked into the free list. self->next = frees self->prev = NULL if self->next: ((kFree *)self->next)->prev = self frees = self // Mark it as a kFree. self->marker = ~0 // Merge with next, if it exists and is a kFree. if NEXT (self) && NEXT (self)->is_free (): // Unlink the next from the frees list. kFree *n = (kFree *)NEXT (self) if n->prev: ((kFree *)n->prev)->next = n->next else: frees = (kFree *)n->next if n->next: ((kFree *)n->next)->prev = n->prev // Unlink the next from the page list. NEXT (self) = NEXT (NEXT (self)) if NEXT (self): PREV (NEXT (self)) = self // Free page if the resulting object is the only thing in it. if !PREV (self) && !NEXT (self): if self->next: ((kFree *)self->next)->prev = self->prev if self->prev: ((kFree *)self->prev)->next = self->next else: frees = (kFree *)self->next pfree ((unsigned)self - SIZE) kPage *kMemory::alloc_page (): kPage *ret = (kPage *)search_free (sizeof (kPage), (void **)&pages) if !ret: return NULL ret->frame = 0 ret->flags = 0 return ret kThread *kMemory::alloc_thread (unsigned size): kThread *ret = (kThread *)search_free (sizeof (kThread) + (size - 1) * sizeof (kThread::caps_store), (void **)&threads) if !ret: return NULL ret->receivers = NULL ret->pc = 0 ret->sp = 0 kThread_arch_init (ret) ret->flags = 0 ret->id = ~0 ret->schedule_prev = NULL ret->schedule_next = NULL ret->slots = size for unsigned i = 0; i < size; ++i: ret->slot[i].prev.thread = NULL ret->slot[i].next.thread = NULL ret->slot[i].caps = NULL return ret void kCaps::init (unsigned s): first_slot.thread = NULL size = s for unsigned i = 0; i < s; ++i: set (i, NULL, 0, kCapRef (), NULL) kCaps *kMemory::alloc_caps (unsigned size): kCaps *ret = (kCaps *)search_free (sizeof (kCaps) + (size - 1) * sizeof (kCapability), (void **)&capses) if !ret: return NULL ret->init (size) return ret kMessage *kMemory::alloc_message (kReceiver *target): kMessage *ret = (kMessage *)search_free (sizeof (kMessage) + sizeof (kCapability), (void **)&target->messages) if !ret: return NULL ret->caps.init (2) if !ret->next: target->last_message = ret return ret kList *kMemory::alloc_list (): kList *ret = (kList *)search_free (sizeof (kList), (void **)&lists) if !ret: return NULL ret->owner.init (1) ret->first_listitem = NULL return ret kListitem *kMemory::alloc_listitem (): kListitem *ret = (kListitem *)search_free (sizeof (kListitem), (void **)&listitems) if !ret: return NULL ret->target.init (1) ret->list = NULL ret->prev_item = NULL ret->next_item = NULL ret->info = 0 return ret kReceiver *kMemory::alloc_receiver (): kReceiver *ret = (kReceiver *)search_free (sizeof (kReceiver), (void **)&receivers) if !ret: return NULL ret->owner = NULL ret->prev_owned = NULL ret->next_owned = NULL ret->alarm_count = ~0 ret->caps = NULL ret->capabilities.reset () ret->messages = NULL ret->last_message = NULL ret->reply_protected_data = ~0 ret->protected_only = false ret->queue_limit = ~0 return ret kMemory *kMemory::alloc_memory (): kMemory *ret = (kMemory *)search_free (sizeof (kMemory), (void **)&memories) if !ret: return NULL ret->frees = NULL ret->pages = NULL ret->threads = NULL ret->capses = NULL ret->receivers = NULL ret->memories = NULL ret->limit = ~0 ret->used = 0 kMemory_arch_init (ret) return ret void kCaps::set (unsigned index, kReceiver *target, Kernel::Num pdata, kCapRef parent, kCapRef *parent_ptr): caps[index].target = target caps[index].protected_data = pdata caps[index].parent = parent caps[index].children.reset () caps[index].sibling_prev.reset () if parent.valid (): caps[index].sibling_next = parent->children parent->children = kCapRef (this, index) else: if parent_ptr: caps[index].sibling_next = *parent_ptr *parent_ptr = kCapRef (this, index) else: caps[index].sibling_next.reset () if caps[index].sibling_next.valid (): caps[index].sibling_next->sibling_prev = kCapRef (this, index) void kCaps::clone (unsigned index, kCapRef source, bool copy): cap (index)->invalidate () if !source.valid (): return if copy: if source->parent.valid (): set (index, source->target, source->protected_data, source->parent) else if (unsigned)source->target & ~KERNEL_MASK: set (index, source->target, source->protected_data, kCapRef (), &source->target->capabilities) else: set (index, source->target, source->protected_data, kCapRef (), &((kObject *)source->protected_data.l)->refs) else: set (index, source->target, source->protected_data, source) void kMemory::free_page (kPage *page): if page->flags & Kernel::Page::PAYING: unuse () if page->frame: pfree (page->frame) free_obj (page, (kPointer *)&pages) void kThread::unset_slot (unsigned s): if !slot[s].caps: return if slot[s].prev.thread: slot[s].prev.thread->slot[slot[s].prev.index].next = slot[s].next else: slot[s].caps->first_slot = slot[s].next if slot[s].next.thread: slot[s].next.thread->slot[slot[s].next.index].prev = slot[s].prev slot[s].prev.thread = NULL slot[s].next.thread = NULL slot[s].caps = NULL void kMemory::free_thread (kThread *thread): thread->unrun () while thread->receivers: thread->receivers->orphan () for unsigned i = 0; i < thread->slots; ++i: thread->unset_slot (i) free_obj (thread, (void **)&threads) void kMemory::free_message (kReceiver *owner, kMessage *message): for unsigned i = 0; i < 2; ++i: message->caps.cap (i)->invalidate () if !message->next: owner->last_message = (kMessageP)message->prev free_obj (message, (void **)&owner->messages) void kMemory::free_receiver (kReceiver *receiver): receiver->orphan () while receiver->capabilities.valid (): receiver->capabilities->invalidate () while receiver->messages: free_message (receiver, receiver->messages) free_obj (receiver, (void **)&receivers) void kReceiver::orphan (): if prev_owned: prev_owned->next_owned = next_owned else: owner->receivers = next_owned if next_owned: next_owned->prev_owned = prev_owned owner = NULL void kReceiver::own (kThread *o): if owner: orphan () owner = o next_owned = o->receivers if next_owned: next_owned->prev_owned = this o->receivers = this void kCapability::invalidate (): if !target: return if sibling_prev.valid (): sibling_prev->sibling_next = sibling_next else if (unsigned)target & ~KERNEL_MASK: target->capabilities = sibling_next else: ((kObject *)protected_data.l)->refs = sibling_next if sibling_next.valid (): sibling_next->sibling_prev = sibling_prev parent.reset () sibling_prev.reset () sibling_next.reset () kCapability *c = this while c: while c->children.valid (): c = c->children.deref () kCapability *next = c->sibling_next.deref () if !next: next = c->parent.deref () c->target = NULL c->parent.reset () c->children.reset () c->sibling_prev.reset () c->sibling_next.reset () c->protected_data = 0 c = next void kMemory::free_caps (kCaps *c): for unsigned i = 0; i < c->size; ++i: c->caps[i].invalidate () while c->first_slot.thread: c->first_slot.thread->unset_slot (c->first_slot.index) free_obj (c, (void **)&capses) void kListitem::add (kList *l): // Remove item from list. if list: if prev_item: prev_item->next_item = next_item else: list->first_listitem = next_item if next_item: next_item->prev_item = prev_item // Notify list owner. if list->owner.cap (0): kCapability::Context context context.data[0] = list->first_listitem != NULL context.data[1] = info list->owner.cap (0)->invoke (&context) // Don't leak info to new owner. info = 0 list = l prev_item = NULL if !l: next_item = NULL return next_item = l->first_listitem l->first_listitem = this if next_item: next_item->prev_item = this void kMemory::free_listitem (kListitem *i): // Unset target. i->target.cap (0)->invalidate () // Remove item from its list. i->add (NULL) // Remove item from its address space. free_obj (i, (void **)&listitems) void kMemory::free_list (kList *l): // Unset callback. l->owner.cap (0)->invalidate () // Clear list. while l->first_listitem: l->first_listitem->add (NULL) // Remove list from address space. free_obj (l, (void **)&lists) void kMemory::free_memory (kMemory *mem): while mem->pages: free_page (mem->pages) while mem->capses: free_caps (mem->capses) while mem->threads: free_thread (mem->threads) while mem->memories: free_memory (mem->memories) while mem->receivers: free_receiver (mem->receivers) while mem->lists: free_list (mem->lists) while mem->listitems: free_listitem (mem->listitems) kMemory_arch_free (mem) if mem->frees: panic (0, "kernel memory leak: memory still in use") free_obj (mem, (void **)&memories) void kPage::forget (): if share_prev || share_next: if share_prev: share_prev->share_next = share_next if share_next: share_next->share_prev = share_prev share_prev = NULL share_next = NULL else: // If the page has a frame and should be freed, free it. if !((flags ^ Kernel::Page::FRAME) & (Kernel::Page::PHYSICAL | Kernel::Page::FRAME)): raw_pfree (frame) frame = 0 flags &= ~(Kernel::Page::FRAME | Kernel::Page::SHARED | Kernel::Page::PHYSICAL | Kernel::Page::UNCACHED) kPage_arch_update_mapping (this) static void check_receiver (kReceiver *r, kCapRef cap, unsigned line): if (unsigned)cap->target & ~KERNEL_MASK: if cap->target != r: dpanic (line, "consistency bug in capabilities") else: if cap->protected_data.l != (unsigned)r: dbg_log ("Buggy: ") dbg_log_num ((unsigned)r) dbg_log (" ") dbg_log_num ((unsigned)cap.caps) dbg_log (" ") dbg_log_num ((unsigned)cap.caps->address_space) dbg_log (" ") dbg_log_num ((unsigned)cap.deref ()) dbg_log (" ") dbg_log_num ((unsigned)cap->target) dbg_log ("/") dbg_log_num (cap->protected_data.l) dbg_log ("\n") dpanic (line, "consistency bug in kernel capabilities") for kCapRef c = cap->children; c.valid (); c = c->sibling_next: check_receiver (r, c, line) void kReceiver::check (unsigned line): for kCapRef cap = capabilities; cap.valid (); cap = cap->sibling_next: check_receiver (this, cap, line) void kMemory::check (unsigned line): for kReceiver *r = receivers; r; r = (kReceiver *)r->next: r->check (line) for kMemory *m = memories; m; m = (kMemory *)m->next: m->check (line) static void print_obj (kObject *o): for kObject *obj = o; o; o = (kObject *)o->next: dbg_log_num ((unsigned)o) dbg_log ("->") dbg_log ("NULL\n") void kMemory::print (unsigned line, unsigned indent): if indent == 0: print_free () for unsigned i = 0; i < indent; ++i: dbg_log_char ('\t') ++indent dbg_log ("Memory ") dbg_log_num ((unsigned)this) dbg_log ("\n") for unsigned i = 0; i < indent; ++i: dbg_log_char ('\t') dbg_log ("frees: ") for kFree *f = frees; f; f = (kFree *)f->next: dbg_log_num ((unsigned)f) dbg_log (":") unsigned n = (unsigned)NEXT (f) if n: n -= (unsigned)f if n >= PAGE_SIZE: dpanic (0, "invalid kFree") dbg_log_num (n, 3) dbg_log ("->") dbg_log ("NULL\n") for unsigned i = 0; i < indent; ++i: dbg_log_char ('\t') dbg_log ("pages: ") print_obj (pages) for unsigned i = 0; i < indent; ++i: dbg_log_char ('\t') dbg_log ("threads: ") print_obj (threads) for unsigned i = 0; i < indent; ++i: dbg_log_char ('\t') dbg_log ("receivers: ") for kReceiver *r = receivers; r; r = (kReceiver *)r->next: dbg_log_num ((unsigned)r) dbg_log ("(") for kMessage *m = r->messages; m; m = (kMessage *)m->next: dbg_log_num ((unsigned)m) dbg_log ("->") dbg_log ("NULL)->") dbg_log ("NULL\n") for unsigned i = 0; i < indent; ++i: dbg_log_char ('\t') dbg_log ("capses: ") print_obj (capses) for kMemory *m = memories; m; m = (kMemory *)m->next: m->print (line, indent) void check_impl (kObject *o, unsigned num, char const *msg): for ; o; o = (kObject *)o->next: unsigned n = (unsigned)NEXT (o) unsigned size = n ? n - (unsigned)o : PAGE_SIZE - ((unsigned)o & PAGE_MASK) if !check_free (o, size): panic (num, msg)