1
0
mirror of git://projects.qi-hardware.com/iris.git synced 2024-07-04 22:21:06 +03:00

Merge branch 'master' of ssh://spark/home/shevek/trendtac/kernel

This commit is contained in:
Bas Wijnen 2009-05-25 21:53:32 +02:00
commit a65e959a8a
14 changed files with 892 additions and 232 deletions

View File

@ -14,9 +14,9 @@ boot_sources = init.cc
BUILT_SOURCES = $(kernel_sources) $(boot_sources) BUILT_SOURCES = $(kernel_sources) $(boot_sources)
PYPP = /usr/bin/pypp PYPP = /usr/bin/pypp
%.cc: %.ccp kernel.hh %.cc: %.ccp
$(PYPP) --name $< < $< > $@ $(PYPP) --name $< < $< > $@
%.hh: %.hhp %.hh: %.hhp boot-programs/sos.h
$(PYPP) --name $< < $< > $@ $(PYPP) --name $< < $< > $@
# Transform ':' into ';' so vim doesn't think there are errors. # Transform ':' into ';' so vim doesn't think there are errors.
@ -28,12 +28,12 @@ arch.hh: mips.hh
arch.cc: mips.cc arch.cc: mips.cc
ln -s $< $@ || true ln -s $< $@ || true
%.o:%.cc Makefile kernel.hh arch.hh %.o:%.cc Makefile kernel.hh arch.hh boot-programs/sos.h
$(CC) $(CPPFLAGS) $(CXXFLAGS) -c $< -o $@ $(CC) $(CPPFLAGS) $(CXXFLAGS) -c $< -o $@
entry.o: thread0 thread1 entry.o: thread0 thread1
%.o:%.S Makefile %.o:%.S Makefile arch.hh
$(CC) $(CPPFLAGS) -DKERNEL_STACK_SIZE=0x2000 -c $< -o $@ $(CC) $(CPPFLAGS) -DKERNEL_STACK_SIZE=0x2000 -c $< -o $@
%: boot-helper.o boot-programs/%.o %: boot-helper.o boot-programs/%.o
@ -52,6 +52,6 @@ junk = mdebug.abi32 reginfo comment pdr
gzip < $< > $@ gzip < $< > $@
clean: clean:
rm -f all uimage *.o all.raw.gz arch.hh arch.cc rm -f all uimage *.o boot-programs/*.o all.raw.gz arch.hh arch.cc
.PHONY: clean .PHONY: clean

208
alloc.ccp
View File

@ -1,18 +1,24 @@
#pypp 0 #pypp 0
#include "kernel.hh" #include "kernel.hh"
// TODO: avoid recursion. #define PREV(x) (((Object_base **)(x))[-2])
#define NEXT(x) (((Object_base **)(x))[-1])
#define SIZE (2 * sizeof (unsigned))
bool Memory::use (): bool Memory::use ():
if used >= limit: // Go up to parents, incrementing used.
return false for Memory *m = this; m; m = m->address_space:
if !parent || parent->use (): if used >= limit:
++used // Not allowed. Restore used for all children.
return true for Memory *r = this; r != m; r = r->address_space:
return false --r->used
return false
++m->used
return true
void Memory::unuse (): void Memory::unuse ():
--used for Memory *m = this; m; m = m->address_space:
return parent->unuse () --m->used
unsigned Memory::palloc (): unsigned Memory::palloc ():
if !use (): if !use ():
@ -53,34 +59,35 @@ void *Memory::search_free (unsigned size, void **first):
Free *f Free *f
unsigned s = 0 unsigned s = 0
for f = frees; f; f = f->next: for f = frees; f; f = f->next:
if f->next_obj: if NEXT (f):
s = (unsigned)f->next_obj - (unsigned)f s = (unsigned)NEXT (f) - (unsigned)f
else: else:
s = PAGE_SIZE - ((unsigned)f & ~PAGE_MASK) s = PAGE_SIZE - ((unsigned)f & ~PAGE_MASK) + SIZE
if s >= size: if s >= size + SIZE:
break break
if !f: if !f:
f = (Free *)palloc () unsigned p = palloc ()
if !f: if !p:
return NULL return NULL
f = (Free *)(p + SIZE)
f->marker = ~0 f->marker = ~0
f->next = frees f->next = frees
f->prev = NULL f->prev = NULL
frees = f frees = f
if f->next: if f->next:
f->next->prev = f f->next->prev = f
f->next_obj = NULL NEXT (f) = NULL
f->prev_obj = NULL PREV (f) = NULL
s = PAGE_SIZE s = PAGE_SIZE
// We have a free block, possibly too large. // We have a free block, possibly too large.
if s >= size + sizeof (Free): if s >= size + sizeof (Free) + SIZE:
// Create the new object at the end and keep the Free. // Create the new object at the end and keep the Free.
Free *obj = (Free *)((unsigned)f + s - size) Free *obj = (Free *)((unsigned)f + s - size - SIZE)
obj->next_obj = f->next_obj NEXT (obj) = NEXT (f)
if obj->next_obj: if NEXT (obj):
obj->next_obj->prev_obj = obj PREV (NEXT (obj)) = obj
obj->prev_obj = f PREV (obj) = f
f->next_obj = obj NEXT (f) = obj
f = obj f = obj
else: else:
if f->prev: if f->prev:
@ -89,6 +96,8 @@ void *Memory::search_free (unsigned size, void **first):
frees = f->next frees = f->next
if f->next: if f->next:
f->next->prev = f->prev f->next->prev = f->prev
f->address_space = this
f->refs = NULL
f->next = (Free *)*first f->next = (Free *)*first
f->prev = NULL f->prev = NULL
if f->next: if f->next:
@ -96,54 +105,62 @@ void *Memory::search_free (unsigned size, void **first):
*first = f *first = f
return f return f
void Object_base::free_obj (Memory *parent): void Memory::free_obj (Object_base *obj):
Free *self Free *self
// Merge with previous, if it exists and is a Free. // Merge with previous, if it exists and is a Free.
if prev_obj && prev_obj->is_free (): if PREV (obj) && PREV (obj)->is_free ():
self = (Free *)prev_obj self = (Free *)PREV (obj)
self->next_obj = next_obj NEXT (self) = NEXT (obj)
if next_obj: if NEXT (obj):
next_obj->prev_obj = self PREV (NEXT (obj)) = self
else: else:
self = (Free *)this self = (Free *)obj
self->next = parent->frees self->next = frees
self->prev = NULL self->prev = NULL
if self->next: if self->next:
self->next->prev = self self->next->prev = self
parent->frees = self frees = self
self->marker = ~0 self->marker = ~0
// Merge with next, if it exists and is a Free. // Merge with next, if it exists and is a Free.
if self->next_obj && self->next_obj->is_free (): if NEXT (self) && NEXT (self)->is_free ():
self->next_obj = self->next_obj->next_obj NEXT (self) = NEXT (NEXT (self))
if self->next_obj: if NEXT (self):
self->next_obj->prev_obj = self PREV (NEXT (self)) = self
// Free page if the resulting object is the only thing in it. // Free page if the resulting object is the only thing in it.
if !self->prev_obj && !self->next_obj: if !PREV (self) && !NEXT (self):
if self->next: if self->next:
self->next->prev = self->prev self->next->prev = self->prev
if self->prev: if self->prev:
self->prev->next = self->next self->prev->next = self->next
else: else:
parent->frees = self->next frees = self->next
parent->pfree ((unsigned)self) pfree ((unsigned)(self - SIZE))
Page *Memory::alloc_page (): Page *Memory::alloc_page ():
Page *ret = (Page *)search_free (sizeof (Page), (void **)&pages) Page *ret = (Page *)search_free (sizeof (Page), (void **)&pages)
if !ret:
return NULL
ret->physical = 0 ret->physical = 0
return ret return ret
Thread *Memory::alloc_thread (): Thread *Memory::alloc_thread ():
Thread *ret = (Thread *)search_free (sizeof (Thread), (void **)&threads) Thread *ret = (Thread *)search_free (sizeof (Thread), (void **)&threads)
if !ret:
return NULL
ret->address_space = this ret->address_space = this
ret->pc = 0 ret->pc = 0
ret->sp = 0 ret->sp = 0
Thread_arch_init (ret) Thread_arch_init (ret)
ret->flags = 0
ret->schedule_prev = NULL ret->schedule_prev = NULL
ret->schedule_next = NULL ret->schedule_next = NULL
ret->receivers = NULL
return ret return ret
Message *Memory::alloc_message (Capability *source): Message *Memory::alloc_message (Capability *source):
Message *ret = (Message *)search_free (sizeof (Message), (void **)&source->target->messages) Message *ret = (Message *)search_free (sizeof (Message), (void **)&source->target->messages)
if !ret:
return NULL
for unsigned i = 0; i < 4; ++i: for unsigned i = 0; i < 4; ++i:
ret->capabilities[i] = NULL ret->capabilities[i] = NULL
ret->data[i] = 0 ret->data[i] = 0
@ -152,6 +169,8 @@ Message *Memory::alloc_message (Capability *source):
Receiver *Memory::alloc_receiver (): Receiver *Memory::alloc_receiver ():
Receiver *ret = (Receiver *)search_free (sizeof (Receiver), (void **)&receivers) Receiver *ret = (Receiver *)search_free (sizeof (Receiver), (void **)&receivers)
if !ret:
return NULL
ret->owner = NULL ret->owner = NULL
ret->prev_owned = NULL ret->prev_owned = NULL
ret->next_owned = NULL ret->next_owned = NULL
@ -159,20 +178,41 @@ Receiver *Memory::alloc_receiver ():
ret->messages = NULL ret->messages = NULL
return ret return ret
Capability *Memory::alloc_capability (Receiver *target, Capability **parent, unsigned protected_data): Capability *Memory::alloc_capability (Receiver *target, Capability *parent, Capability **parent_ptr, unsigned protected_data, Capability *ret):
Capability *ret = (Capability *)search_free (sizeof (Capability), (void **)&capabilities) if !ret:
ret = (Capability *)search_free (sizeof (Capability), (void **)&capabilities)
if !ret:
return NULL
ret->target = target ret->target = target
ret->parent = parent
ret->children = NULL ret->children = NULL
ret->sibling_prev = NULL ret->sibling_prev = NULL
ret->sibling_next = parent ? *parent : NULL ret->sibling_next = parent_ptr ? *parent_ptr : NULL
if ret->sibling_next: if ret->sibling_next:
ret->sibling_next->sibling_prev = ret ret->sibling_next->sibling_prev = ret
ret->protected_data = protected_data ret->protected_data = protected_data
return ret return ret
Capability *Memory::clone_capability (Capability *source, bool copy, Capability *ret):
if copy:
return alloc_capability (source->target, source->parent, source->parent ? &source->parent->children : &source->target->capabilities, source->protected_data, ret)
else:
return alloc_capability (source->target, source, &source->children, source->protected_data, ret)
Cappage *Memory::alloc_cappage ():
Cappage *ret = (Cappage *)search_free (sizeof (Cappage), (void **)&cappages)
if !ret:
return NULL
ret->page = (Capability *)zalloc ()
if !ret->page:
free_cappage (ret)
return NULL
return ret
Memory *Memory::alloc_memory (): Memory *Memory::alloc_memory ():
Memory *ret = (Memory *)search_free (sizeof (Memory), (void **)&memories) Memory *ret = (Memory *)search_free (sizeof (Memory), (void **)&memories)
ret->parent = this if !ret:
return NULL
ret->frees = NULL ret->frees = NULL
ret->pages = NULL ret->pages = NULL
ret->threads = NULL ret->threads = NULL
@ -190,8 +230,9 @@ void Memory::free_page (Page *page):
if page->next: if page->next:
page->next->prev = page->prev page->next->prev = page->prev
unuse () unuse ()
pfree (page->physical) if page->physical:
page->free_obj (this) pfree (page->physical)
free_obj (page)
void Memory::free_thread (Thread *thread): void Memory::free_thread (Thread *thread):
if thread->prev: if thread->prev:
@ -207,76 +248,73 @@ void Memory::free_thread (Thread *thread):
first_scheduled = thread->schedule_next first_scheduled = thread->schedule_next
if thread->schedule_next: if thread->schedule_next:
thread->schedule_next->schedule_prev = thread->schedule_prev thread->schedule_next->schedule_prev = thread->schedule_prev
thread->free_obj (this) while thread->receivers:
thread->receivers->orphan ()
free_obj (thread)
void Memory::free_message (Message *message): void Memory::free_message (Message *message):
for unsigned i = 0; i < 4; ++i: for unsigned i = 0; i < 4; ++i:
free_capability (message->capabilities[i]) free_capability (message->capabilities[i])
message->free_obj (this) free_obj (message)
void Memory::free_receiver (Receiver *receiver): void Memory::free_receiver (Receiver *receiver):
if receiver->prev_owned: receiver->orphan ()
receiver->prev_owned->next_owned = receiver->next_owned
else:
receiver->owner->receivers = receiver->next_owned
if receiver->next_owned:
receiver->next_owned->prev_owned = receiver->prev_owned
while receiver->capabilities: while receiver->capabilities:
receiver->capabilities->invalidate () receiver->capabilities->invalidate ()
while receiver->messages: while receiver->messages:
free_message (receiver->messages) free_message (receiver->messages)
receiver->free_obj (this) free_obj (receiver)
void Receiver::orphan ():
if prev_owned:
prev_owned->next_owned = next_owned
else:
owner->receivers = next_owned
if next_owned:
next_owned->prev_owned = prev_owned
owner = NULL
void Receiver::own (Thread *o):
if owner:
orphan ()
owner = o
next_owned = o->receivers
if next_owned:
next_owned->prev_owned = this
o->receivers = this
void Memory::free_capability (Capability *capability): void Memory::free_capability (Capability *capability):
if capability->sibling_prev: capability->invalidate ()
capability->sibling_prev->sibling_next = capability->sibling_next free_obj (capability)
else:
capability->target->capabilities = capability->sibling_next
if capability->sibling_next:
capability->sibling_next->sibling_prev = capability->sibling_prev
// The sibling_prev link is used here to point to the parent.
// This method is used to avoid recursion.
capability->sibling_prev = NULL
Capability *c = capability
while c->children:
c->children->sibling_prev = c
c = c->children
while c:
Capability *next = c->sibling_next
if !next:
next = c->sibling_prev
if next:
next->sibling_prev = c->sibling_prev
c->free_obj (this)
c = next
void Capability::invalidate (): void Capability::invalidate ():
if sibling_prev: if sibling_prev:
sibling_prev->sibling_next = sibling_next sibling_prev->sibling_next = sibling_next
else: else if target:
target->capabilities = sibling_next target->capabilities = sibling_next
if sibling_next: if sibling_next:
sibling_next->sibling_prev = sibling_prev sibling_next->sibling_prev = sibling_prev
// The sibling_prev link is used here to point to the parent.
// This method is used to avoid recursion.
sibling_prev = NULL
Capability *c = this Capability *c = this
while c->children: while c->children:
c->children->sibling_prev = c
c = c->children c = c->children
while c: while c:
Capability *next = c->sibling_next Capability *next = c->sibling_next
if !next: if !next:
next = c->sibling_prev next = c->parent
if next:
next->sibling_prev = c->sibling_prev
c->target = NULL c->target = NULL
c->parent = NULL
c->children = NULL c->children = NULL
c->sibling_prev = NULL c->sibling_prev = NULL
c->sibling_next = NULL c->sibling_next = NULL
c->protected_data = 0 c->protected_data = 0
c = next c = next
void Memory::free_cappage (Cappage *p):
for unsigned i = 0; i < CAPPAGE_SIZE; ++i:
p->page[i].invalidate ()
zfree ((unsigned)p->page)
free_obj (p)
void Memory::free_memory (Memory *mem): void Memory::free_memory (Memory *mem):
if mem->prev: if mem->prev:
mem->prev->next = mem->next mem->prev->next = mem->next
@ -291,4 +329,4 @@ void Memory::free_memory (Memory *mem):
while mem->memories: while mem->memories:
free_memory (mem->memories) free_memory (mem->memories)
Memory_arch_free (mem) Memory_arch_free (mem)
mem->free_obj (this) free_obj (mem)

View File

@ -26,3 +26,4 @@ __start:
.comm __top_memory, 4 .comm __top_memory, 4
.comm __my_memory, 4 .comm __my_memory, 4
.comm __my_admin, 4 .comm __my_admin, 4
.comm __my_call, 4

View File

@ -7,6 +7,7 @@ extern "C" {
#define KERNEL_MASK 0xfff #define KERNEL_MASK 0xfff
#define CAPTYPE_MASK 0xe00 #define CAPTYPE_MASK 0xe00
#define REQUEST_MASK (KERNEL_MASK & ~CAPTYPE_MASK)
#define CAPTYPE_ADMIN 0x000 #define CAPTYPE_ADMIN 0x000
#define CAPTYPE_RECEIVER 0x200 #define CAPTYPE_RECEIVER 0x200
#define CAPTYPE_MEMORY 0x400 #define CAPTYPE_MEMORY 0x400
@ -26,29 +27,259 @@ extern "C" {
#define CAP_RECEIVER_SET_OWNER 1 #define CAP_RECEIVER_SET_OWNER 1
#define CAP_RECEIVER_CREATE_CAPABILITY 2 #define CAP_RECEIVER_CREATE_CAPABILITY 2
#define CAP_RECEIVER_CREATE_CALL_CAPABILITY 3 #define CAP_RECEIVER_CREATE_CALL_CAPABILITY 3
/* Not an operation; a capability with this bit set is a call capability. */
#define CAP_RECEIVER_CALL 4
#define CAP_MEMORY_CREATE 1 #define CAP_MEMORY_CREATE 1
#define CAP_MEMORY_DESTROY 2 #define CAP_MEMORY_DESTROY 2
#define CAP_MEMORY_LIST 3 #define CAP_MEMORY_LIST 3
#define CAP_MEMORY_MAPPING 4 #define CAP_MEMORY_MAPPING 4
#define CAP_MEMORY_DROP 5 #define CAP_MEMORY_SET_LIMIT 5
#define CAP_MEMORY_GET_LIMIT 6
#define CAP_MEMORY_DROP 7
#define CAP_THREAD_RUN 1 #define CAP_THREAD_RUN 1
#define CAP_THREAD_RUN_CONDITIONAL 2 #define CAP_THREAD_GET_INFO 2 /* Details of this are arch-specific. */
#define CAP_THREAD_SLEEP 3 #define CAP_THREAD_SET_INFO 3 /* Details of this are arch-specific. */
#define CAP_THREAD_GET_INFO 4 /* Details of this are arch-specific. */ /* Flag values for processor state */
#define CAP_THREAD_SET_INFO 5 /* Details of this are arch-specific. */ #define THREAD_FLAG_WAITING 0x80000000
#define THREAD_FLAG_RUNNING 0x40000000
#define CAP_PAGE_MAP 1 #define CAP_PAGE_MAP 1
#define CAP_PAGE_SHARE 2 #define CAP_PAGE_SHARE 2
#define CAP_PAGE_SHARE_COW 3 #define CAP_PAGE_SHARE_COW 3
#define CAP_PAGE_FORGET 4 #define CAP_PAGE_FORGET 4
// Not an operation; a capability without this bit cannot write to the page. */
#define CAP_PAGE_WRITE 5
#define CAP_CAPABILITY_GET 1 #define CAP_CAPABILITY_GET 1
#define CAP_CAPABILITY_SET_DEATH_NOTIFY 2 #define CAP_CAPABILITY_SET_DEATH_NOTIFY 2
#define CAP_CAPPAGE_SET 1 #define CAPPAGE_SIZE 102
#define CAP_CAPPAGE_GET 2 /* Cappage has page's operations as well. */
#define CAP_CAPPAGE_SET 6
#ifndef __KERNEL
typedef unsigned __Capability;
extern __Capability __my_receiver;
extern __Capability __my_admin;
extern __Capability __my_memory;
extern __Capability __my_call;
__Capability __cap_copy (__Capability src)
{
return src | 2;
}
typedef struct __Message
{
unsigned data[4];
__Capability cap[4];
} __Message;
static int __invoke (__Capability target, __Message *msg)
{
register int ret __asm__ ("v0");
register unsigned v0 __asm__ ("v0") = target;
register unsigned a0 __asm__ ("a0") = msg->cap[0];
register unsigned a1 __asm__ ("a1") = msg->cap[1];
register unsigned a2 __asm__ ("a2") = msg->cap[2];
register unsigned a3 __asm__ ("a3") = msg->cap[3];
register unsigned t0 __asm__ ("t0") = msg->data[0];
register unsigned t1 __asm__ ("t1") = msg->data[1];
register unsigned t2 __asm__ ("t2") = msg->data[2];
register unsigned t3 __asm__ ("t3") = msg->data[3];
__asm__ volatile ("syscall" : "+r" (v0), "=r" (a0), "=r" (a1), "=r" (a2), "=r" (a3), "=r" (t0), "=r" (t1), "=r" (t2), "=r" (t3));
return ret;
}
static int __call (__Capability target, __Message *msg)
{
register int ret __asm__ ("v0");
register unsigned v0 __asm__ ("v0") = target;
register unsigned a0 __asm__ ("a0") = msg->cap[0];
register unsigned a1 __asm__ ("a1") = msg->cap[1];
register unsigned a2 __asm__ ("a2") = msg->cap[2];
register unsigned a3 __asm__ ("a3") = msg->cap[3];
register unsigned t0 __asm__ ("t0") = msg->data[0];
register unsigned t1 __asm__ ("t1") = msg->data[1];
register unsigned t2 __asm__ ("t2") = msg->data[2];
register unsigned t3 __asm__ ("t3") = msg->data[3];
__asm__ volatile ("syscall" : "+r" (v0), "+r" (a0), "+r" (a1), "+r" (a2), "+r" (a3), "+r" (t0), "+r" (t1), "+r" (t2), "+r" (t3));
msg->cap[0] = a0;
msg->cap[1] = a1;
msg->cap[2] = a2;
msg->cap[3] = a3;
msg->data[0] = t0;
msg->data[1] = t1;
msg->data[2] = t2;
msg->data[3] = t3;
return ret;
}
static int __invoke_01 (__Capability t, unsigned d)
{
__Message msg;
int ret;
msg.data[0] = d;
return __invoke (t, &msg);
}
static int __invoke_02 (__Capability t, unsigned d0, unsigned d1)
{
__Message msg;
int ret;
msg.data[0] = d0;
msg.data[1] = d1;
return __invoke (t, &msg);
}
static int __invoke_11 (__Capability t, __Capability c, unsigned d)
{
__Message msg;
int ret;
msg.cap[0] = c;
msg.data[0] = d;
return __invoke (t, &msg);
}
static int __invoke_12 (__Capability t, __Capability c, unsigned d0, unsigned d1)
{
__Message msg;
int ret;
msg.cap[0] = c;
msg.data[0] = d0;
msg.data[1] = d1;
return __invoke (t, &msg);
}
static __Capability __call_c01 (__Capability c, unsigned d)
{
__Message msg;
int ret;
msg.cap[0] = c;
msg.data[0] = d;
ret = __call (__my_call, &msg);
return ret ? msg.cap[0] : 0;
}
static __Capability __call_c02 (__Capability c, unsigned d0, unsigned d1)
{
__Message msg;
int ret;
msg.cap[0] = c;
msg.data[0] = d0;
msg.data[1] = d1;
ret = __call (__my_call, &msg);
return ret ? msg.cap[0] : 0;
}
static __Capability __degrade (__Capability src, unsigned mask)
{
return __call_c02 (src, CAP_DEGRADE, mask);
}
static void __schedule ()
{
__invoke_01 (__my_admin, CAP_ADMIN_SCHEDULE);
}
static int __receiver_set_owner (__Capability receiver, __Capability owner)
{
return __invoke_11 (receiver, owner, CAP_RECEIVER_SET_OWNER);
}
static __Capability __receiver_create_capability (__Capability receiver, unsigned protected_data)
{
return __call_c02 (receiver, CAP_RECEIVER_CREATE_CAPABILITY, protected_data);
}
static __Capability __receiver_create_call_capability (__Capability receiver, unsigned protected_data)
{
return __call_c02 (receiver, CAP_RECEIVER_CREATE_CALL_CAPABILITY, protected_data);
}
static __Capability __memory_create (__Capability memory, unsigned type)
{
return __call_c02 (memory, CAP_MEMORY_CREATE, type);
}
static __Capability __memory_create_page (__Capability memory)
{
return __memory_create (memory, CAPTYPE_PAGE);
}
static __Capability __memory_create_thread (__Capability memory)
{
return __memory_create (memory, CAPTYPE_THREAD);
}
static __Capability __memory_create_receiver (__Capability memory)
{
return __memory_create (memory, CAPTYPE_RECEIVER);
}
static __Capability __memory_create_memory (__Capability memory)
{
return __memory_create (memory, CAPTYPE_MEMORY);
}
static __Capability __memory_create_cappage (__Capability memory)
{
return __memory_create (memory, CAPTYPE_CAPPAGE);
}
static int __memory_destroy (__Capability memory, __Capability target)
{
return __invoke_11 (memory, target, CAP_MEMORY_DESTROY);
}
/* TODO: #define CAP_MEMORY_LIST 3 */
static __Capability __memory_mapping (__Capability memory, unsigned address)
{
return __call_c02 (memory, CAP_MEMORY_MAPPING, address);
}
static void __drop (__Capability cap)
{
__invoke_11 (__my_memory, cap, CAP_MEMORY_DROP);
}
static int __thread_run (__Capability thread, int run)
{
return __invoke_02 (thread, CAP_THREAD_RUN, run);
}
/* TODO:
#define CAP_THREAD_GET_INFO 4
#define CAP_THREAD_SET_INFO 5
*/
/* TODO: all except map should also work for cappages.
#define CAP_PAGE_MAP 1
#define CAP_PAGE_SHARE 2
#define CAP_PAGE_SHARE_COW 3
#define CAP_PAGE_FORGET 4
*/
static __Capability __capability_get (__Capability cap)
{
return __call_c01 (cap, CAP_CAPABILITY_GET);
}
static int __capability_set_death_notify (__Capability source, __Capability target)
{
return __invoke_11 (source, target, CAP_CAPABILITY_SET_DEATH_NOTIFY);
}
static int __capability_cappage_set (__Capability page, __Capability cap, unsigned index)
{
return __invoke_12 (page, cap, CAP_CAPPAGE_SET, index);
}
#endif
#ifdef __cplusplus #ifdef __cplusplus
} }

51
entry.S
View File

@ -4,50 +4,9 @@
.globl run_idle .globl run_idle
.set noat .set noat
#define Index 0 #define ARCH
#define Random 1 #define ASM
#define EntryLo0 2 #include "arch.hh"
#define EntryLo1 3
#define BadVAddr 8
#define EntryHi 10
#define Status 12
#define EPC 14
// register save positions in Thread
#define SAVE_PC (5 * 4)
#define SAVE_SP (SAVE_PC + 4)
#define SAVE_AT (SAVE_SP + 4)
#define SAVE_V0 (SAVE_AT + 4)
#define SAVE_V1 (SAVE_V0 + 4)
#define SAVE_A0 (SAVE_V1 + 4)
#define SAVE_A1 (SAVE_A0 + 4)
#define SAVE_A2 (SAVE_A1 + 4)
#define SAVE_A3 (SAVE_A2 + 4)
#define SAVE_T0 (SAVE_A3 + 4)
#define SAVE_T1 (SAVE_T0 + 4)
#define SAVE_T2 (SAVE_T1 + 4)
#define SAVE_T3 (SAVE_T2 + 4)
#define SAVE_T4 (SAVE_T3 + 4)
#define SAVE_T5 (SAVE_T4 + 4)
#define SAVE_T6 (SAVE_T5 + 4)
#define SAVE_T7 (SAVE_T6 + 4)
#define SAVE_T8 (SAVE_T7 + 4)
#define SAVE_T9 (SAVE_T8 + 4)
#define SAVE_S0 (SAVE_T9 + 4)
#define SAVE_S1 (SAVE_S0 + 4)
#define SAVE_S2 (SAVE_S1 + 4)
#define SAVE_S3 (SAVE_S2 + 4)
#define SAVE_S4 (SAVE_S3 + 4)
#define SAVE_S5 (SAVE_S4 + 4)
#define SAVE_S6 (SAVE_S5 + 4)
#define SAVE_S7 (SAVE_S6 + 4)
#define SAVE_GP (SAVE_S7 + 4)
#define SAVE_FP (SAVE_GP + 4)
#define SAVE_RA (SAVE_FP + 4)
#define SAVE_HI (SAVE_RA + 4)
#define SAVE_LO (SAVE_HI + 4)
#define SAVE_K0 (SAVE_LO + 4)
#define SAVE_K1 (SAVE_K0 + 4)
addr_000: addr_000:
// TLB refill // TLB refill
@ -105,7 +64,7 @@ start_idle: // 288
// TODO: save only fragile registers now, the rest on task switch. // TODO: save only fragile registers now, the rest on task switch.
kernel_exit: kernel_exit:
lw $k0, SAVE_PC($v0) lw $k0, SAVE_PC($v0)
mtc0 $k0, $EPC mtc0 $k0, $CP0_EPC
lw $k0, SAVE_LO($v0) lw $k0, SAVE_LO($v0)
lw $k1, SAVE_HI($v0) lw $k1, SAVE_HI($v0)
mtlo $k0 mtlo $k0
@ -190,7 +149,7 @@ save_regs:
mflo $v1 mflo $v1
sw $v0, SAVE_HI($k0) sw $v0, SAVE_HI($k0)
sw $v1, SAVE_LO($k0) sw $v1, SAVE_LO($k0)
mfc0 $k1, $EPC mfc0 $k1, $CP0_EPC
sw $k1, SAVE_PC($k0) sw $k1, SAVE_PC($k0)
lw $gp, -0xd84($zero) lw $gp, -0xd84($zero)

View File

@ -7,18 +7,17 @@
static void init_idle (): static void init_idle ():
// initialize idle task as if it is currently running. // initialize idle task as if it is currently running.
idle.prev_obj = NULL
idle.next_obj = NULL
idle.prev = NULL idle.prev = NULL
idle.next = NULL idle.next = NULL
idle.schedule_prev = NULL idle.schedule_prev = NULL
idle.schedule_next = NULL idle.schedule_next = NULL
idle.address_space = &idle_memory idle.address_space = &idle_memory
idle.refs = NULL
// initialize idle_memory. // initialize idle_memory.
idle_memory.prev_obj = NULL
idle_memory.next_obj = NULL
idle_memory.prev = NULL idle_memory.prev = NULL
idle_memory.next = NULL idle_memory.next = NULL
idle_memory.address_space = NULL
idle_memory.refs = NULL
idle_memory.pages = &idle_page idle_memory.pages = &idle_page
idle_memory.threads = &idle idle_memory.threads = &idle
idle_memory.memories = NULL idle_memory.memories = NULL
@ -27,11 +26,11 @@ static void init_idle ():
idle_memory.arch.directory = (unsigned **)0x80000000 idle_memory.arch.directory = (unsigned **)0x80000000
idle_memory.arch.asid = 0 idle_memory.arch.asid = 0
// initialize idle_page // initialize idle_page
idle_page.prev_obj = NULL
idle_page.next_obj = NULL
idle_page.prev = NULL idle_page.prev = NULL
idle_page.next = NULL idle_page.next = NULL
idle_page.physical = 0x80000000 idle_page.physical = 0x80000000
idle_page.refs = NULL
idle_page.address_space = NULL
current = &idle current = &idle
static void init_cp0 (): static void init_cp0 ():
@ -118,7 +117,8 @@ static void init_threads ():
panic (0x22446688, "unable to map initial page") panic (0x22446688, "unable to map initial page")
else: else:
for unsigned p = (shdr->sh_addr & PAGE_MASK); p <= ((shdr->sh_addr + shdr->sh_size - 1) & PAGE_MASK); p += PAGE_SIZE: for unsigned p = (shdr->sh_addr & PAGE_MASK); p <= ((shdr->sh_addr + shdr->sh_size - 1) & PAGE_MASK); p += PAGE_SIZE:
Page *page = mem->get_mapping (p) bool write = false
Page *page = mem->get_mapping (p, &write)
if !page: if !page:
page = mem->alloc_page () page = mem->alloc_page ()
if !page: if !page:
@ -127,6 +127,8 @@ static void init_threads ():
if !page->physical || !mem->map (page, p, true): if !page->physical || !mem->map (page, p, true):
panic (0x33557799, "unable to map initial bss page") panic (0x33557799, "unable to map initial bss page")
else: else:
if !write:
panic (0x20203030, "bss section starts on read-only page")
for unsigned a = p; a < p + PAGE_SIZE; a += 4: for unsigned a = p; a < p + PAGE_SIZE; a += 4:
if a >= shdr->sh_addr + shdr->sh_size: if a >= shdr->sh_addr + shdr->sh_size:
break break
@ -134,11 +136,10 @@ static void init_threads ():
continue continue
((unsigned *)page->physical)[(a & ~PAGE_MASK) >> 2] = 0 ((unsigned *)page->physical)[(a & ~PAGE_MASK) >> 2] = 0
for unsigned p = 0; p <= ((thread_start[i + 1] - thread_start[i] - 1) >> PAGE_BITS); ++p: for unsigned p = 0; p <= ((thread_start[i + 1] - thread_start[i] - 1) >> PAGE_BITS); ++p:
// TODO: this also skips pages where new space is allocated.
if pages[p]: if pages[p]:
continue continue
++top_memory.limit ++top_memory.limit
top_memory.zfree (thread_start[i] + (p << PAGE_BITS)) top_memory.pfree (thread_start[i] + (p << PAGE_BITS))
Page *stackpage = mem->alloc_page () Page *stackpage = mem->alloc_page ()
stackpage->physical = mem->zalloc () stackpage->physical = mem->zalloc ()
if !stackpage || !mem->map (stackpage, 0x7ffff000, true): if !stackpage || !mem->map (stackpage, 0x7ffff000, true):
@ -146,9 +147,10 @@ static void init_threads ():
thread->arch.a0 = (unsigned)mem->alloc_receiver () thread->arch.a0 = (unsigned)mem->alloc_receiver ()
thread->arch.a1 = (unsigned)&top_memory thread->arch.a1 = (unsigned)&top_memory
thread->arch.a2 = (unsigned)mem thread->arch.a2 = (unsigned)mem
Capability *admin = mem->alloc_capability ((Receiver *)(CAPTYPE_ADMIN | ~PAGE_MASK), &mem->capabilities, ~0) Capability *admin = mem->alloc_capability ((Receiver *)(CAPTYPE_ADMIN | ~PAGE_MASK), NULL, &mem->capabilities, ~0)
thread->arch.a3 = (unsigned)admin thread->arch.a3 = (unsigned)admin
mem->pfree ((unsigned)pages) mem->pfree ((unsigned)pages)
thread->flags = THREAD_FLAG_RUNNING
thread->schedule_next = NULL thread->schedule_next = NULL
thread->schedule_prev = previous thread->schedule_prev = previous
if previous: if previous:
@ -167,10 +169,10 @@ void init ():
runners = NULL runners = NULL
zero_pages = NULL zero_pages = NULL
// Fill junk pages with all memory not currently used. // Fill junk pages with all memory not currently used.
junk_pages = (FreePage *)(((unsigned)&_end + (1 << 12) - 1) & ~((1 << 12) - 1)) junk_pages = (FreePage *)(((unsigned)&_end + ~PAGE_MASK) & PAGE_MASK)
FreePage *p, *next FreePage *p, *next
unsigned count = 1 unsigned count = 1
for p = junk_pages, next = p; (unsigned)next - 0x80000000 < (1 << 27); p = next, next = (FreePage *)((unsigned)p + (1 << 12)): for p = junk_pages, next = p; (unsigned)next - 0x80000000 < (1 << 27); p = next, next = (FreePage *)((unsigned)p + ~PAGE_MASK + 1):
p->next = next p->next = next
++count ++count
p->next = NULL p->next = NULL
@ -179,11 +181,10 @@ void init ():
// initialize everything about the idle task. // initialize everything about the idle task.
init_idle () init_idle ()
// initialize top_memory. // initialize top_memory.
top_memory.prev_obj = NULL
top_memory.next_obj = NULL
top_memory.prev = NULL top_memory.prev = NULL
top_memory.next = NULL top_memory.next = NULL
top_memory.parent = NULL top_memory.address_space = NULL
top_memory.refs = NULL
top_memory.pages = NULL top_memory.pages = NULL
top_memory.threads = NULL top_memory.threads = NULL
top_memory.memories = NULL top_memory.memories = NULL
@ -191,7 +192,11 @@ void init ():
top_memory.used = 0 top_memory.used = 0
top_memory.arch.directory = NULL top_memory.arch.directory = NULL
top_memory.arch.asid = 0 top_memory.arch.asid = 0
for unsigned i = 0; i < 63; ++i:
asids[i] = i + 1
asids[63] = 0
init_threads () init_threads ()
// Enable all interrupts and say we're handling an exception. // Enable all interrupts and say we're handling an exception.

View File

@ -7,17 +7,18 @@ Thread *tlb_refill ():
//panic (0x88776655, "TLB refill") //panic (0x88776655, "TLB refill")
unsigned EntryHi unsigned EntryHi
cp0_get (CP0_ENTRY_HI, EntryHi) cp0_get (CP0_ENTRY_HI, EntryHi)
Page *page0 = current->address_space->get_mapping (EntryHi & ~(1 << 12)) bool write0 = false, write1 = false
Page *page1 = current->address_space->get_mapping (EntryHi | (1 << 12)) Page *page0 = current->address_space->get_mapping (EntryHi & ~(1 << 12), &write0)
Page *page1 = current->address_space->get_mapping (EntryHi | (1 << 12), &write1)
if (!(EntryHi & (1 << 12)) && !page0) || ((EntryHi & (1 << 12)) && !page1): if (!(EntryHi & (1 << 12)) && !page0) || ((EntryHi & (1 << 12)) && !page1):
panic (0x22222222, "no page mapped at requested address") panic (0x22222222, "no page mapped at requested address")
unsigned low0, low1 unsigned low0, low1
if page0: if page0:
low0 = (unsigned)page0->physical | 0x18 | 0x4 | 0x2 low0 = ((page0->physical & ~0x80000fff) >> 6) | 0x18 | (write0 ? 0x4 : 0) | 0x2
else else
low0 = 0 low0 = 0
if page1: if page1:
low1 = (unsigned)page1->physical | 0x18 | 0x4 | 0x2 low1 = ((page1->physical & ~0x80000fff) >> 6) | 0x18 | (write1 ? 0x4 : 0) | 0x2
else else
low1 = 0 low1 = 0
cp0_set (CP0_ENTRY_LO0, low0) cp0_set (CP0_ENTRY_LO0, low0)
@ -27,7 +28,7 @@ Thread *tlb_refill ():
/// An interrupt which is not an exception has occurred. /// An interrupt which is not an exception has occurred.
Thread *interrupt (): Thread *interrupt ():
panic (0x88877722) panic (0x88877722, "Interrupt")
unsigned cause unsigned cause
cp0_get (CP0_CAUSE, cause) cp0_get (CP0_CAUSE, cause)
for unsigned i = 0; i < 8; ++i: for unsigned i = 0; i < 8; ++i:
@ -47,21 +48,15 @@ Thread *exception ():
cp0_get (CP0_CAUSE, cause) cp0_get (CP0_CAUSE, cause)
switch (cause >> 2) & 0x1f: switch (cause >> 2) & 0x1f:
case 0: case 0:
// Interrupt. // Interrupt. This shouldn't happen, since CAUSE[IV] == 1.
panic (0x11223344, "Interrupt.") panic (0x11223344, "Interrupt on exception vector.")
case 1: case 1:
// TLB modification. // TLB modification.
panic (0x21223344, "TLB modification.") panic (0x21223344, "TLB modification.")
case 2: case 2:
//unsigned a
//cp0_get (CP0_EPC, a)
//panic (a)
// TLB load or instruction fetch. // TLB load or instruction fetch.
panic (0x31223344, "TLB load or instruction fetch.") panic (0x31223344, "TLB load or instruction fetch.")
case 3: case 3:
unsigned a
cp0_get (CP0_EPC, a)
panic (a)
// TLB store. // TLB store.
panic (0x41223344, "TLB store.") panic (0x41223344, "TLB store.")
case 4: case 4:
@ -87,7 +82,6 @@ Thread *exception ():
panic (0x91223344, "Breakpoint.") panic (0x91223344, "Breakpoint.")
case 10: case 10:
// Reserved instruction. // Reserved instruction.
panic (*(unsigned *)0x004000b0)
panic (0xa1223344, "Reserved instruction.") panic (0xa1223344, "Reserved instruction.")
case 11: case 11:
// Coprocessor unusable. // Coprocessor unusable.

View File

@ -1,14 +1,283 @@
#pypp 0 #pypp 0
#include "kernel.hh" #include "kernel.hh"
Capability *Memory::find_capability (unsigned code): Capability *Memory::find_capability (unsigned code, bool *copy):
for Capability *c = capabilities; c; c = c->next: *copy = code & 2 ? true : false
if c == (Capability *)code: if code & 1:
return c // Cappage capability
unsigned num = (code & ~PAGE_MASK) >> 1
if num >= CAPPAGE_SIZE:
return NULL
Capability *page = (Capability *)(code & PAGE_MASK)
for Cappage *p = cappages; p; p = p->next:
if p->page == page:
return &page[num]
else:
// Normal capability
for Capability *c = capabilities; c; c = c->next:
if c == (Capability *)code:
return c
return NULL return NULL
void Capability::invoke (unsigned d0, unsigned d1, unsigned d2, unsigned d3, Capability *c0, Capability *c1, Capability *c2, Capability *c3): static Capability *reply
if (unsigned)target & PAGE_MASK:
// TODO: Create message in receiver. static void reply_cap (unsigned target, unsigned protected_data):
return Capability r
// TODO: Handle kernel request. Capability **ref
if target & ~KERNEL_MASK:
ref = &((Receiver *)target)->capabilities
else:
ref = &((Object_base *)protected_data)->refs
// alloc_capability needs a Memory, but it isn't used if return storage is given.
top_memory.alloc_capability ((Receiver *)target, NULL, ref, protected_data, &r)
unsigned d[4] = { 0, 0, 0, 0 }
Capability *caps[4] = { &r, NULL, NULL, NULL }
bool cops[4] = { true, false, false, false }
reply->invoke (d, caps, cops)
static void reply_cap (Capability *cap, bool copy):
unsigned d[4] = { 0, 0, 0, 0 }
Capability *caps[4] = { cap, NULL, NULL, NULL }
bool cops[4] = { copy, false, false, false }
reply->invoke (d, caps, cops)
static void reply_num (unsigned num):
unsigned d[4] = { num, 0, 0, 0 }
Capability *caps[4] = { NULL, NULL, NULL, NULL }
bool cops[4] = { false, false, false, false }
reply->invoke (d, caps, cops)
static void admin_invoke (unsigned target, Capability *cap, unsigned request, unsigned data):
switch request:
case CAP_ADMIN_SCHEDULE:
schedule ()
break
default:
break
static void receiver_invoke (unsigned target, unsigned protected_data, Capability *cap, unsigned request, unsigned data):
Receiver *receiver = (Receiver *)protected_data
switch request:
case CAP_RECEIVER_SET_OWNER:
if ((unsigned)cap->target & (CAPTYPE_MASK | ~KERNEL_MASK)) != CAPTYPE_THREAD:
// FIXME: This makes it impossible to use a fake thread capability.
return
receiver->own ((Thread *)cap->protected_data)
break
case CAP_RECEIVER_CREATE_CAPABILITY:
reply_cap ((unsigned)receiver, data)
break
case CAP_RECEIVER_CREATE_CALL_CAPABILITY:
reply_cap (CAPTYPE_RECEIVER | CAP_RECEIVER_CALL, protected_data)
break
default:
break
static void memory_invoke (unsigned target, unsigned protected_data, Capability *cap, unsigned request, unsigned data):
Memory *mem = (Memory *)protected_data
switch request:
case CAP_MEMORY_CREATE:
switch data:
case CAPTYPE_RECEIVER:
Receiver *ret = mem->alloc_receiver ()
if ret:
reply_cap (data | REQUEST_MASK, (unsigned)ret)
else:
reply_num (0)
break
case CAPTYPE_MEMORY:
Memory *ret = mem->alloc_memory ()
if ret:
reply_cap (data | REQUEST_MASK, (unsigned)ret)
else:
reply_num (0)
break
case CAPTYPE_THREAD:
Thread *ret = mem->alloc_thread ()
if ret:
reply_cap (data | REQUEST_MASK, (unsigned)ret)
else:
reply_num (0)
break
case CAPTYPE_PAGE:
Page *ret = mem->alloc_page ()
if ret:
reply_cap (data | REQUEST_MASK, (unsigned)ret)
else:
reply_num (0)
break
case CAPTYPE_CAPPAGE:
Cappage *ret = mem->alloc_cappage ()
if ret:
reply_cap (data | REQUEST_MASK, (unsigned)ret)
else:
reply_num (0)
break
default:
return
break
case CAP_MEMORY_DESTROY:
// TODO
break
case CAP_MEMORY_LIST:
// TODO
break
case CAP_MEMORY_MAPPING:
bool write
Page *page = mem->get_mapping (data, &write)
unsigned t = CAPTYPE_PAGE | REQUEST_MASK
if !write:
t &= ~CAP_PAGE_WRITE
reply_cap (t, (unsigned)page)
break
case CAP_MEMORY_SET_LIMIT:
mem->limit = data
break
case CAP_MEMORY_GET_LIMIT:
reply_num (mem->limit)
break
case CAP_MEMORY_DROP:
if cap->address_space != mem:
break
mem->free_capability (cap)
break
default:
break
static void thread_invoke (unsigned target, unsigned protected_data, Capability *cap, unsigned request, unsigned data):
Thread *thread = (Thread *)protected_data
switch request:
case CAP_THREAD_RUN:
if data:
thread->run ()
else:
thread->unrun ()
break
case CAP_THREAD_GET_INFO:
// TODO
case CAP_THREAD_SET_INFO:
// TODO
default:
break
static void page_invoke (unsigned target, unsigned protected_data, Capability *cap, unsigned request, unsigned data):
Page *page
Cappage *cappage
if (target & CAPTYPE_MASK) == CAPTYPE_PAGE:
page = (Page *)protected_data
cappage = NULL
else:
page = NULL
cappage = (Cappage *)protected_data
switch request:
case CAP_PAGE_MAP:
if !page:
return
// TODO
case CAP_PAGE_SHARE:
// TODO
case CAP_PAGE_SHARE_COW:
// TODO
case CAP_PAGE_FORGET:
// TODO
case CAP_CAPPAGE_SET:
if !cappage:
return
// TODO
default:
break
static void capability_invoke (unsigned target, unsigned protected_data, Capability *cap, unsigned request, unsigned data):
Capability *capability = (Capability *)protected_data
switch request:
case CAP_CAPABILITY_GET:
reply_cap (capability, true)
break
case CAP_CAPABILITY_SET_DEATH_NOTIFY:
// TODO
default:
break
static bool kernel_invoke (unsigned target, unsigned protected_data, unsigned d[4], Capability *c[4], bool copy[4]):
// Kernel calling convention:
// data[0] is the request.
// cap[0] is the reply capability
// other parameters' meanings depend on the operation.
if !((1 << d[0]) & target & ~REQUEST_MASK):
// You are not allowed to perform this operation.
return false
reply = c[0]
if d[0] == CAP_DEGRADE:
reply_cap (target & d[1], protected_data)
return true
switch target & CAPTYPE_MASK:
case CAPTYPE_ADMIN:
admin_invoke (target, c[1], d[0], d[1])
break
case CAPTYPE_RECEIVER:
if target & CAP_RECEIVER_CALL:
// This is a call capability.
// TODO
return false
receiver_invoke (target, protected_data, c[1], d[0], d[1])
break
case CAPTYPE_MEMORY:
memory_invoke (target, protected_data, c[1], d[0], d[1])
break
case CAPTYPE_THREAD:
thread_invoke (target, protected_data, c[1], d[0], d[1])
break
case CAPTYPE_PAGE:
page_invoke (target, protected_data, c[1], d[0], d[1])
break
case CAPTYPE_CAPABILITY:
capability_invoke (target, protected_data, c[1], d[0], d[1])
break
case CAPTYPE_CAPPAGE:
page_invoke (target, protected_data, c[1], d[0], d[1])
break
default:
panic (0x99337744, "invalid capability type invoked")
return true
bool Capability::invoke (unsigned data[4], Capability *cap[4], bool copy[4]):
if (unsigned)target & ~KERNEL_MASK:
// This is not a kernel capability: send a message to the receiver.
bool tried_direct = false
if target->owner && target->owner->is_waiting ():
Capability *c[4]
for unsigned i = 0; i < 4; ++i:
if !cap[i]:
c[i] = NULL
else:
c[i] = target->owner->address_space->clone_capability (cap[i], copy[i])
if !c[i]:
for unsigned j = 0; j < i; ++j:
target->owner->address_space->free_capability (c[i])
tried_direct = true
break
if !tried_direct:
Thread_arch_receive (target->owner, data, c)
target->owner->unwait ()
return true
// The owner was not waiting, or it was not possible to deliver the message. Put it in the queue.
Message *msg = target->address_space->alloc_message (this)
if !msg:
return false
for unsigned i = 0; i < 4; ++i:
msg->data[i] = data[i]
if !cap[i]:
msg->capabilities[i] = NULL
else:
msg->capabilities[i] = target->address_space->clone_capability (cap[i], copy[i])
if !msg->capabilities[i]:
for unsigned j = 0; j < i; ++j:
target->address_space->free_capability (msg->capabilities[j])
target->address_space->free_message (msg)
return false
if tried_direct:
Thread_arch_receive_fail (target->owner)
target->owner->unwait ()
return true
// This is a kernel capability. Use a function to allow optimized call capabilities.
return kernel_invoke ((unsigned)target, protected_data, data, cap, copy)

View File

@ -2,6 +2,7 @@
#ifndef _KERNEL_HH #ifndef _KERNEL_HH
#define _KERNEL_HH #define _KERNEL_HH
#define __KERNEL
#include "boot-programs/sos.h" #include "boot-programs/sos.h"
#ifndef EXTERN #ifndef EXTERN
@ -16,14 +17,14 @@ struct Thread
struct Message struct Message
struct Receiver struct Receiver
struct Capability struct Capability
struct Cappage
struct Memory struct Memory
#include "arch.hh" #include "arch.hh"
struct Object_base: struct Object_base:
// Next and previous object of any type in the same page. Capability *refs
Object_base *prev_obj, *next_obj Memory *address_space
void free_obj (Memory *parent)
inline bool is_free () inline bool is_free ()
template <typename _T> // template <typename _T> //
@ -43,11 +44,17 @@ struct Page : public Object <Page>:
unsigned physical unsigned physical
struct Thread : public Object <Thread>: struct Thread : public Object <Thread>:
Memory *address_space Receiver *receivers
unsigned pc, sp unsigned pc, sp
Thread_arch arch Thread_arch arch
unsigned flags
Thread *schedule_prev, *schedule_next Thread *schedule_prev, *schedule_next
Receiver *receivers void run ()
void unrun ()
void wait ()
void unwait ()
bool is_waiting ():
return flags & THREAD_FLAG_WAITING
struct Message : public Object <Message>: struct Message : public Object <Message>:
Capability *capabilities[4] Capability *capabilities[4]
@ -59,29 +66,35 @@ struct Receiver : public Object <Receiver>:
Receiver *prev_owned, *next_owned Receiver *prev_owned, *next_owned
Capability *capabilities Capability *capabilities
Message *messages Message *messages
void own (Thread *o)
void orphan ()
struct Capability : public Object <Capability>: struct Capability : public Object <Capability>:
Receiver *target Receiver *target
Capability *parent
Capability *children Capability *children
Capability *sibling_prev, *sibling_next Capability *sibling_prev, *sibling_next
unsigned protected_data unsigned protected_data
void invoke (unsigned d0, unsigned d1, unsigned d2, unsigned d3, Capability *c0, Capability *c1, Capability *c2, Capability *c3) bool invoke (unsigned data[4], Capability *cap[4], bool copy[4])
void invalidate () void invalidate ()
struct Cappage : public Object <Cappage>:
Capability *page
struct Memory : public Object <Memory>: struct Memory : public Object <Memory>:
Memory *parent
Free *frees Free *frees
Page *pages Page *pages
Thread *threads Thread *threads
Receiver *receivers Receiver *receivers
Capability *capabilities Capability *capabilities
Cappage *cappages
Memory *memories Memory *memories
unsigned limit, used unsigned limit, used
Memory_arch arch Memory_arch arch
inline bool map (Page *page, unsigned address, bool write) inline bool map (Page *page, unsigned address, bool write)
inline void unmap (Page *page, unsigned address) inline void unmap (Page *page, unsigned address)
inline Page *get_mapping (unsigned address) inline Page *get_mapping (unsigned address, bool *writable)
// Allocation of pages. // Allocation of pages.
bool use () bool use ()
@ -97,7 +110,9 @@ struct Memory : public Object <Memory>:
Thread *alloc_thread () Thread *alloc_thread ()
Message *alloc_message (Capability *source) Message *alloc_message (Capability *source)
Receiver *alloc_receiver () Receiver *alloc_receiver ()
Capability *alloc_capability (Receiver *target, Capability **parent, unsigned protected_data) Capability *alloc_capability (Receiver *target, Capability *parent, Capability **parent_ptr, unsigned protected_data, Capability *ret = NULL)
Capability *clone_capability (Capability *source, bool copy, Capability *ret = NULL)
Cappage *alloc_cappage ()
Memory *alloc_memory () Memory *alloc_memory ()
void free_page (Page *page) void free_page (Page *page)
@ -105,17 +120,21 @@ struct Memory : public Object <Memory>:
void free_message (Message *message) void free_message (Message *message)
void free_receiver (Receiver *receiver) void free_receiver (Receiver *receiver)
void free_capability (Capability *capability) void free_capability (Capability *capability)
void free_cappage (Cappage *page)
void free_memory (Memory *mem) void free_memory (Memory *mem)
Capability *find_capability (unsigned code) void free_obj (Object_base *obj)
Capability *find_capability (unsigned code, bool *copy)
// Functions which can be called from assembly must not be mangled. // Functions which can be called from assembly must not be mangled.
extern "C": extern "C":
// Panic. n is sent over caps led. message is currently ignored. // Panic. n is sent over caps led. message is currently ignored.
void panic (unsigned n, char const *message = "") void panic (unsigned n, char const *message = "")
// Debug: switch caps led // Debug: switch caps led
void led (bool one, bool two, bool three) void dbg_led (bool one, bool two, bool three)
void dbg_sleep (unsigned ms) void dbg_sleep (unsigned ms)
void dbg_send (unsigned code, unsigned bits = 32)
void schedule () void schedule ()
@ -133,19 +152,20 @@ EXTERN Thread *current
// Defined in arch.cc // Defined in arch.cc
void Thread_arch_init (Thread *thread) void Thread_arch_init (Thread *thread)
void Thread_arch_receive (Thread *thread, unsigned d[4], Capability *c[4])
void Thread_arch_receive_fail (Thread *thread)
void Memory_arch_init (Memory *mem) void Memory_arch_init (Memory *mem)
void Memory_arch_free (Memory *mem) void Memory_arch_free (Memory *mem)
bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write) bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write)
void Memory_arch_unmap (Memory *mem, Page *page, unsigned address) void Memory_arch_unmap (Memory *mem, Page *page, unsigned address)
Page *Memory_arch_get_mapping (Memory *mem, unsigned address) Page *Memory_arch_get_mapping (Memory *mem, unsigned address, bool *writable)
void arch_invoke () void arch_invoke ()
void arch_schedule (Thread *previous, Thread *target)
bool Memory::map (Page *page, unsigned address, bool write): bool Memory::map (Page *page, unsigned address, bool write):
return Memory_arch_map (this, page, address, write) return Memory_arch_map (this, page, address, write)
void Memory::unmap (Page *page, unsigned address): void Memory::unmap (Page *page, unsigned address):
Memory_arch_unmap (this, page, address) Memory_arch_unmap (this, page, address)
Page *Memory::get_mapping (unsigned address): Page *Memory::get_mapping (unsigned address, bool *writable):
return Memory_arch_get_mapping (this, address) return Memory_arch_get_mapping (this, address, writable)
#endif #endif

View File

@ -28,13 +28,35 @@ void Thread_arch_init (Thread *thread):
thread->arch.k0 = 0 thread->arch.k0 = 0
thread->arch.k1 = 0 thread->arch.k1 = 0
void Thread_arch_receive (Thread *thread, unsigned d[4], Capability *c[4]):
thread->arch.a0 = (unsigned)c[0]
thread->arch.a1 = (unsigned)c[1]
thread->arch.a2 = (unsigned)c[2]
thread->arch.a3 = (unsigned)c[3]
thread->arch.t0 = d[0]
thread->arch.t1 = d[1]
thread->arch.t2 = d[2]
thread->arch.t3 = d[3]
thread->arch.v0 = 1
void Thread_arch_receive_fail (Thread *thread):
thread->arch.v0 = 0
void Memory_arch_init (Memory *mem): void Memory_arch_init (Memory *mem):
++g_asid mem->arch.asid = 1
if g_asid > 0x3f:
g_asid = 1
mem->arch.asid = g_asid
mem->arch.directory = NULL mem->arch.directory = NULL
static void flush_tlb (unsigned asid):
for unsigned tlb = 1; tlb < 32; ++tlb:
cp0_set (CP0_INDEX, tlb)
__asm__ volatile ("tlbr")
unsigned hi
cp0_get (CP0_ENTRY_HI, hi)
if (hi & 0x1f) == asid:
// Set asid to 0, which is only used by the idle task.
cp0_set (CP0_ENTRY_HI, 0x2000 * tlb)
__asm__ volatile ("tlbwi")
void Memory_arch_free (Memory *mem): void Memory_arch_free (Memory *mem):
if !mem->arch.directory: if !mem->arch.directory:
return return
@ -50,6 +72,10 @@ void Memory_arch_free (Memory *mem):
mem->unuse () mem->unuse ()
mem->zfree ((unsigned)table) mem->zfree ((unsigned)table)
mem->arch.directory[i] = NULL mem->arch.directory[i] = NULL
if (Memory *)asids[mem->arch.asid] == mem:
flush_tlb (mem->arch.asid)
asids[mem->arch.asid] = asids[0]
asids[0] = mem->arch.asid
mem->unuse () mem->unuse ()
mem->zfree ((unsigned)mem->arch.directory) mem->zfree ((unsigned)mem->arch.directory)
@ -58,42 +84,66 @@ bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write):
mem->arch.directory = (unsigned **)mem->zalloc () mem->arch.directory = (unsigned **)mem->zalloc ()
if !mem->arch.directory: if !mem->arch.directory:
return false return false
unsigned *table = mem->arch.directory[(unsigned)address >> 22] unsigned *table = mem->arch.directory[address >> 22]
if !table: if !table:
table = (unsigned *)mem->zalloc () table = (unsigned *)mem->zalloc ()
if !table: if !table:
return false return false
mem->arch.directory[(unsigned)address >> 22] = table mem->arch.directory[address >> 22] = table
unsigned idx = ((unsigned)address >> 12) & ((1 << 10) - 1) unsigned idx = (address >> 12) & ((1 << 10) - 1)
if table[idx]: if table[idx]:
mem->unmap ((Page *)(table[idx] & ~3), address) mem->unmap ((Page *)(table[idx] & ~3), address)
table[idx] = write ? (unsigned)page : (unsigned)page + 1 table[idx] = write ? (unsigned)page : (unsigned)page + 1
return true return true
void Memory_arch_unmap (Memory *mem, Page *page, unsigned address): void Memory_arch_unmap (Memory *mem, Page *page, unsigned address):
unsigned *table = mem->arch.directory[(unsigned)address >> 22] unsigned *table = mem->arch.directory[address >> 22]
table[((unsigned)address >> 12) & ((1 << 10) - 1)] = 0 table[(address >> 12) & ((1 << 10) - 1)] = 0
Page *Memory_arch_get_mapping (Memory *mem, unsigned address): Page *Memory_arch_get_mapping (Memory *mem, unsigned address, bool *writable):
unsigned *table = mem->arch.directory[(unsigned)address >> 22] unsigned *table = mem->arch.directory[address >> 22]
unsigned v = table[((unsigned)address >> 12) & ((1 << 10) - 1)] unsigned v = table[(address >> 12) & ((1 << 10) - 1)]
if writable:
*writable = !(v & 1)
return (Page *)(v & ~1) return (Page *)(v & ~1)
void arch_invoke (): void arch_invoke ():
Capability *target, *c0, *c1, *c2, *c3 Capability *target, *c[4]
target = current->address_space->find_capability (current->arch.v0) bool wait, copy[4]
Thread *caller = current
target = caller->address_space->find_capability (current->arch.v0, &wait)
if wait:
caller->wait ()
if !target: if !target:
// TODO: there must be no action here. This is just because the rest doesn't work yet. // TODO: there must be no action here. This is just because the rest doesn't work yet.
led (current->arch.a0, current->arch.a1, current->arch.a2) dbg_led (caller->arch.a0, caller->arch.a1, caller->arch.a2)
dbg_sleep (1000) dbg_sleep (1000)
schedule () schedule ()
// Calling an invalid capability always fails.
caller->arch.v0 = 0
return return
c0 = current->address_space->find_capability (current->arch.a0) c[0] = caller->address_space->find_capability (caller->arch.a0, &copy[0])
c1 = current->address_space->find_capability (current->arch.a1) c[1] = caller->address_space->find_capability (caller->arch.a1, &copy[1])
c2 = current->address_space->find_capability (current->arch.a2) c[2] = caller->address_space->find_capability (caller->arch.a2, &copy[2])
c3 = current->address_space->find_capability (current->arch.a3) c[3] = caller->address_space->find_capability (caller->arch.a3, &copy[3])
target->invoke (current->arch.t0, current->arch.t1, current->arch.t2, current->arch.t3, c0, c1, c2, c3) unsigned d[4]
d[0] = caller->arch.t0
void arch_schedule (Thread *previous, Thread *target): d[1] = caller->arch.t1
cp0_set (CP0_ENTRY_HI, target->address_space->arch.asid) d[2] = caller->arch.t2
// TODO: flush TLB if the asid is already taken. d[3] = caller->arch.t3
caller->arch.v0 = target->invoke (d, c, copy) ? 1 : 0
if caller != current:
if (Memory *)asids[current->address_space->arch.asid] != current->address_space:
if asids[0]:
current->address_space->arch.asid = asids[0]
asids[0] = asids[asids[0]]
else:
static unsigned random = 1
current->address_space->arch.asid = random
// Overwrite used asid, so flush those values from tlb.
flush_tlb (random)
++random
if random >= 64:
random = 1
asids[current->address_space->arch.asid] = (unsigned)current
cp0_set (CP0_ENTRY_HI, current->address_space->arch.asid)

View File

@ -48,6 +48,44 @@
#define PAGE_SIZE (1 << PAGE_BITS) #define PAGE_SIZE (1 << PAGE_BITS)
#define PAGE_MASK (~(PAGE_SIZE - 1)) #define PAGE_MASK (~(PAGE_SIZE - 1))
// register save positions in Thread
#define SAVE_PC (5 * 4)
#define SAVE_SP (SAVE_PC + 4)
#define SAVE_AT (SAVE_SP + 4)
#define SAVE_V0 (SAVE_AT + 4)
#define SAVE_V1 (SAVE_V0 + 4)
#define SAVE_A0 (SAVE_V1 + 4)
#define SAVE_A1 (SAVE_A0 + 4)
#define SAVE_A2 (SAVE_A1 + 4)
#define SAVE_A3 (SAVE_A2 + 4)
#define SAVE_T0 (SAVE_A3 + 4)
#define SAVE_T1 (SAVE_T0 + 4)
#define SAVE_T2 (SAVE_T1 + 4)
#define SAVE_T3 (SAVE_T2 + 4)
#define SAVE_T4 (SAVE_T3 + 4)
#define SAVE_T5 (SAVE_T4 + 4)
#define SAVE_T6 (SAVE_T5 + 4)
#define SAVE_T7 (SAVE_T6 + 4)
#define SAVE_T8 (SAVE_T7 + 4)
#define SAVE_T9 (SAVE_T8 + 4)
#define SAVE_S0 (SAVE_T9 + 4)
#define SAVE_S1 (SAVE_S0 + 4)
#define SAVE_S2 (SAVE_S1 + 4)
#define SAVE_S3 (SAVE_S2 + 4)
#define SAVE_S4 (SAVE_S3 + 4)
#define SAVE_S5 (SAVE_S4 + 4)
#define SAVE_S6 (SAVE_S5 + 4)
#define SAVE_S7 (SAVE_S6 + 4)
#define SAVE_GP (SAVE_S7 + 4)
#define SAVE_FP (SAVE_GP + 4)
#define SAVE_RA (SAVE_FP + 4)
#define SAVE_HI (SAVE_RA + 4)
#define SAVE_LO (SAVE_HI + 4)
#define SAVE_K0 (SAVE_LO + 4)
#define SAVE_K1 (SAVE_K0 + 4)
#ifndef ASM
struct Thread_arch: struct Thread_arch:
unsigned at, v0, v1, a0, a1, a2, a3 unsigned at, v0, v1, a0, a1, a2, a3
unsigned t0, t1, t2, t3, t4, t5, t6, t7, t8, t9 unsigned t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
@ -58,7 +96,9 @@ struct Memory_arch:
unsigned asid unsigned asid
unsigned **directory unsigned **directory
EXTERN unsigned g_asid // Pointers to Memory when asid is taken, index of next free, or 0, if free.
// asid[0] is used as index to first free asid.
EXTERN unsigned asids[64]
// Functions which can be called from assembly must not be mangled. // Functions which can be called from assembly must not be mangled.
extern "C": extern "C":
@ -80,4 +120,6 @@ extern "C":
extern unsigned thread_start[NUM_THREADS + 1] extern unsigned thread_start[NUM_THREADS + 1]
#endif #endif
#endif // defined ASM
#endif #endif

View File

@ -3,8 +3,4 @@
void panic (unsigned n, char const *message): void panic (unsigned n, char const *message):
while (1): while (1):
for unsigned bit = 0x80000000; bit; bit >>= 1: dbg_send (n)
for int i = 0; i < 600000; ++i:
led (n & bit, i > 200000 && i < 400000, false)
for int i = 0; i < 1000000; ++i:
led (false, false, true)

View File

@ -1,12 +1,51 @@
#pypp 0 #pypp 0
#include "kernel.hh" #include "kernel.hh"
void Thread::run ():
if flags & THREAD_FLAG_RUNNING:
return
flags |= THREAD_FLAG_RUNNING
if flags & THREAD_FLAG_WAITING:
return
schedule_next = first_scheduled
if schedule_next:
schedule_next->schedule_prev = this
first_scheduled = this
void Thread::unrun ():
if !(flags & THREAD_FLAG_RUNNING):
return
flags &= ~THREAD_FLAG_RUNNING
if !(flags & THREAD_FLAG_WAITING):
if current == this:
current = schedule_next
if schedule_prev:
schedule_prev->schedule_next = schedule_next
else:
first_scheduled = schedule_next
if schedule_next:
schedule_next->schedule_prev = schedule_prev
void Thread::wait ():
if flags & THREAD_FLAG_WAITING:
return
if flags & THREAD_FLAG_RUNNING:
unrun ()
flags |= THREAD_FLAG_WAITING
void Thread::unwait ():
if !(flags & THREAD_FLAG_WAITING):
return
flags &= ~THREAD_FLAG_WAITING
if flags & THREAD_FLAG_RUNNING:
flags &= ~THREAD_FLAG_RUNNING
run ()
void schedule (): void schedule ():
Thread *old = current Thread *old = current
current = current->schedule_next if current:
current = current->schedule_next
if !current: if !current:
current = first_scheduled current = first_scheduled
if !current: if !current:
current = &idle current = &idle
if old != current:
arch_schedule (old, current)

View File

@ -58,7 +58,7 @@ static void __gpio_clear_pin (unsigned n):
#define NETWORK_IO 9 #define NETWORK_IO 9
#define LIGHT 105 #define LIGHT 105
void led (bool one, bool two, bool three): void dbg_led (bool one, bool two, bool three):
__gpio_as_output (CAPSLOCKLED_IO) __gpio_as_output (CAPSLOCKLED_IO)
__gpio_as_output (NUMLOCKLED_IO) __gpio_as_output (NUMLOCKLED_IO)
__gpio_as_output (NETWORK_IO) __gpio_as_output (NETWORK_IO)
@ -76,5 +76,21 @@ void led (bool one, bool two, bool three):
__gpio_set_pin (NETWORK_IO) __gpio_set_pin (NETWORK_IO)
void dbg_sleep (unsigned ms): void dbg_sleep (unsigned ms):
for unsigned i = 0; i < 10000 * ms; ++i: for unsigned i = 0; i < 2673 * ms; ++i:
__gpio_as_output (CAPSLOCKLED_IO) __gpio_as_output (CAPSLOCKLED_IO)
void dbg_send (unsigned code, unsigned bits):
for int i = bits; i >= 0; --i:
bool on = code & (1 << i)
dbg_led (false, false, false)
dbg_sleep (200)
if on:
dbg_led (true, false, false)
else:
dbg_led (false, true, false)
dbg_sleep (400)
dbg_led (false, false, false)
dbg_sleep (200)
dbg_led (true, true, false)
dbg_sleep (200)
dbg_led (false, false, false)