#pypp 0 // Iris: micro-kernel for a capability-based operating system. // alloc.ccp: Allocation of kernel structures. // Copyright 2009 Bas Wijnen // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see . #include "kernel.hh" #define PREV(x) (((Object_base **)(x))[-2]) #define NEXT(x) (((Object_base **)(x))[-1]) #define SIZE (2 * sizeof (Object_base *)) bool Memory::use (): // Go up to parents, incrementing used. for Memory *m = this; m; m = m->address_space: if used >= limit: // Not allowed. Restore used for all children. for Memory *r = this; r != m; r = r->address_space: --r->used return false ++m->used return true void Memory::unuse (): for Memory *m = this; m; m = m->address_space: --m->used unsigned raw_zalloc (): FreePage *ret = zero_pages if !ret: ret = junk_pages junk_pages = ret->next for unsigned i = 0; i < (PAGE_SIZE >> 2); ++i: ((unsigned *)ret)[i] = 0 else: zero_pages = ret->next ret->next = NULL return (unsigned)ret void raw_pfree (unsigned page): if !page: return FreePage *p = (FreePage *)page p->next = junk_pages junk_pages = p unsigned Memory::palloc (): if !use (): return NULL FreePage *ret = junk_pages if !ret: ret = zero_pages zero_pages = ret->next else: junk_pages = ret->next return (unsigned)ret unsigned Memory::zalloc (): if !use (): return NULL return raw_zalloc () void Memory::pfree (unsigned page): unuse () return raw_pfree (page) void Memory::zfree (unsigned page): unuse () FreePage *p = (FreePage *)page p->next = zero_pages zero_pages = p void *Memory::search_free (unsigned size, void **first): Free *f unsigned s = 0 for f = frees; f; f = f->next: if NEXT (f): s = (unsigned)NEXT (f) - (unsigned)f else: s = PAGE_SIZE - ((unsigned)f & ~PAGE_MASK) + SIZE if s >= size + SIZE: break if !f: unsigned p = palloc () if !p: return NULL f = (Free *)(p + SIZE) f->marker = ~0 f->next = frees f->prev = NULL frees = f if f->next: f->next->prev = f NEXT (f) = NULL PREV (f) = NULL s = PAGE_SIZE // We have a free block, possibly too large. if s >= size + sizeof (Free) + 2 * SIZE: // Create the new object at the end and keep the Free. Free *obj = (Free *)((unsigned)f + s - size - SIZE) NEXT (obj) = NEXT (f) if NEXT (obj): PREV (NEXT (obj)) = obj PREV (obj) = f NEXT (f) = obj f = obj else: if f->prev: f->prev->next = f->next else: frees = f->next if f->next: f->next->prev = f->prev f->address_space = this f->refs = NULL f->next = (Free *)*first f->prev = NULL if f->next: f->next->prev = f *first = f return f void Memory::free_obj (Object_base *obj): Free *self // Merge with previous, if it exists and is a Free. if PREV (obj) && PREV (obj)->is_free (): self = (Free *)PREV (obj) NEXT (self) = NEXT (obj) if NEXT (obj): PREV (NEXT (obj)) = self else: self = (Free *)obj self->next = frees self->prev = NULL if self->next: self->next->prev = self frees = self self->marker = ~0 // Merge with next, if it exists and is a Free. if NEXT (self) && NEXT (self)->is_free (): NEXT (self) = NEXT (NEXT (self)) if NEXT (self): PREV (NEXT (self)) = self // Free page if the resulting object is the only thing in it. if !PREV (self) && !NEXT (self): if self->next: self->next->prev = self->prev if self->prev: self->prev->next = self->next else: frees = self->next pfree ((unsigned)self - SIZE) Page *Memory::alloc_page (): Page *ret = (Page *)search_free (sizeof (Page), (void **)&pages) if !ret: return NULL ret->data.frame = 0 ret->data.flags = 0 return ret Thread *Memory::alloc_thread (): Thread *ret = (Thread *)search_free (sizeof (Thread), (void **)&threads) if !ret: return NULL ret->address_space = this ret->pc = 0 ret->sp = 0 Thread_arch_init (ret) ret->flags = 0 ret->schedule_prev = NULL ret->schedule_next = NULL ret->receivers = NULL return ret Message *Memory::alloc_message (Receiver *target, unsigned protected_data): Message *ret = (Message *)search_free (sizeof (Message), (void **)&target->messages) if !ret: return NULL for unsigned i = 0; i < 4; ++i: ret->capabilities[i] = NULL ret->data[i] = 0 ret->protected_data = protected_data return ret Receiver *Memory::alloc_receiver (): Receiver *ret = (Receiver *)search_free (sizeof (Receiver), (void **)&receivers) if !ret: return NULL ret->owner = NULL ret->prev_owned = NULL ret->next_owned = NULL ret->capabilities = NULL ret->messages = NULL ret->reply_protected_data = ~0 ret->protected_only = false return ret Capability *Memory::alloc_capability (Receiver *target, Capability *parent, Capability **parent_ptr, unsigned protected_data, Capability *ret): if !ret: ret = (Capability *)search_free (sizeof (Capability), (void **)&capabilities) if !ret: return NULL ret->target = target ret->protected_data = protected_data ret->parent = parent ret->children = NULL ret->sibling_prev = NULL if parent: ret->sibling_next = parent->children parent->children = ret else: if parent_ptr: ret->sibling_next = *parent_ptr else: ret->sibling_next = NULL if ret->sibling_next: ret->sibling_next->sibling_prev = ret return ret Capability *Memory::clone_capability (Capability *source, bool copy, Capability *ret): if copy: if source->parent: return alloc_capability (source->target, source->parent, &source->parent->children, source->protected_data, ret) else if (unsigned)source->target & ~KERNEL_MASK: return alloc_capability (source->target, source->parent, &source->target->capabilities, source->protected_data, ret) else: return alloc_capability (source->target, source->parent, &((Object_base *)source->protected_data)->refs, source->protected_data, ret) else: return alloc_capability (source->target, source, &source->children, source->protected_data, ret) Cappage *Memory::alloc_cappage (): Cappage *ret = (Cappage *)search_free (sizeof (Cappage), (void **)&cappages) if !ret: return NULL ret->data.frame = zalloc () if !ret->data.frame: free_cappage (ret) return NULL ret->data.flags = 0 return ret Memory *Memory::alloc_memory (): Memory *ret = (Memory *)search_free (sizeof (Memory), (void **)&memories) if !ret: return NULL ret->frees = NULL ret->pages = NULL ret->threads = NULL ret->memories = NULL ret->limit = ~0 ret->used = 0 Memory_arch_init (ret) return ret void Memory::free_page (Page *page): if page->prev: page->prev->next = page->next else: pages = page->next if page->next: page->next->prev = page->prev unuse () if page->data.frame: pfree (page->data.frame) free_obj (page) void Memory::free_thread (Thread *thread): if thread->prev: thread->prev->next = thread->next else: threads = thread->next if thread->next: thread->next->prev = thread->prev // Unschedule. if thread->schedule_prev: thread->schedule_prev->schedule_next = thread->schedule_next else if first_scheduled == thread: first_scheduled = thread->schedule_next if thread->schedule_next: thread->schedule_next->schedule_prev = thread->schedule_prev while thread->receivers: thread->receivers->orphan () free_obj (thread) void Memory::free_message (Message *message): for unsigned i = 0; i < 4; ++i: free_capability (message->capabilities[i]) free_obj (message) void Memory::free_receiver (Receiver *receiver): receiver->orphan () while receiver->capabilities: receiver->capabilities->invalidate () while receiver->messages: free_message (receiver->messages) free_obj (receiver) void Receiver::orphan (): if prev_owned: prev_owned->next_owned = next_owned else: owner->receivers = next_owned if next_owned: next_owned->prev_owned = prev_owned owner = NULL void Receiver::own (Thread *o): if owner: orphan () owner = o next_owned = o->receivers if next_owned: next_owned->prev_owned = this o->receivers = this void Memory::free_capability (Capability *capability): capability->invalidate () free_obj (capability) void Capability::invalidate (): if !target: return if sibling_prev: sibling_prev->sibling_next = sibling_next else if (unsigned)target & ~KERNEL_MASK: target->capabilities = sibling_next else: ((Object_base *)protected_data)->refs = sibling_next if sibling_next: sibling_next->sibling_prev = sibling_prev parent = NULL sibling_prev = NULL sibling_next = NULL Capability *c = this while c->children: c = c->children while c: Capability *next = c->sibling_next if !next: next = c->parent c->target = NULL c->parent = NULL c->children = NULL c->sibling_prev = NULL c->sibling_next = NULL c->protected_data = 0 c = next void Memory::free_cappage (Cappage *p): for unsigned i = 0; i < CAPPAGE_SIZE; ++i: ((Capability *)p->data.frame)[i].invalidate () zfree (p->data.frame) free_obj (p) void Memory::free_memory (Memory *mem): if mem->prev: mem->prev->next = mem->next else: memories = mem->next if mem->next: mem->next->prev = mem->prev while mem->pages: free_page (mem->pages) while mem->threads: free_thread (mem->threads) while mem->memories: free_memory (mem->memories) Memory_arch_free (mem) free_obj (mem) void Page::forget (): if data.share_prev || data.share_next: if data.share_prev: ((Page *)data.share_prev)->data.share_next = data.share_next if data.share_next: ((Page *)data.share_next)->data.share_prev = data.share_prev data.share_prev = NULL data.share_next = NULL else: if ~data.flags & PAGE_FLAG_PHYSICAL: raw_pfree (data.frame) data.frame = 0 data.flags &= ~(PAGE_FLAG_FRAME | PAGE_FLAG_SHARED | PAGE_FLAG_PHYSICAL | PAGE_FLAG_UNCACHED) Page_arch_update_mapping (this) void Cappage::forget (): if data.share_prev || data.share_next: if data.share_prev: ((Cappage *)data.share_prev)->data.share_next = data.share_next if data.share_next: ((Cappage *)data.share_next)->data.share_prev = data.share_prev data.share_prev = NULL data.share_next = NULL else: for unsigned i = 0; i < CAPPAGE_SIZE; ++i: ((Capability *)data.frame)[i].invalidate () raw_pfree (data.frame) data.frame = 0 data.flags &= ~(PAGE_FLAG_FRAME | PAGE_FLAG_SHARED)