1
0
mirror of git://projects.qi-hardware.com/iris.git synced 2024-11-05 08:37:30 +02:00
iris/invoke.ccp
2009-06-10 22:54:12 +02:00

713 lines
24 KiB
COBOL

#pypp 0
// Iris: micro-kernel for a capability-based operating system.
// invoke.ccp: Capability invocation and kernel responses.
// Copyright 2009 Bas Wijnen <wijnen@debian.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
#include "kernel.hh"
Capability *Memory::find_capability (unsigned code, bool *copy):
*copy = code & 2 ? true : false
if code & 1:
// Cappage capability
unsigned num = (code & ~PAGE_MASK) >> 1
if num >= CAPPAGE_SIZE:
return NULL
Capability *page = (Capability *)(code & PAGE_MASK)
for Cappage *p = cappages; p; p = p->next:
if p->data.frame == (unsigned)page:
return &page[num]
else:
// Normal capability
for Capability *c = capabilities; c; c = c->next:
if c == (Capability *)code:
return c
return NULL
bool Receiver::try_deliver ():
if !messages || !owner || !owner->is_waiting ():
return false
Message *m = messages
if protected_only:
for ; m; m = m->next:
if m->protected_data == reply_protected_data:
break
if !m:
return false
Capability::Context c
for unsigned i = 0; i < 4; ++i:
if m->capabilities[i]:
c.cap[i] = owner->address_space->clone_capability (m->capabilities[i], true)
if !c.cap[i]:
for unsigned j = 0; j < i; ++j:
owner->address_space->free_capability (c.cap[i])
return false
Thread_arch_receive (owner, m->protected_data, &c)
owner->unwait ()
return true
bool Receiver::send_message (unsigned protected_data, Capability::Context *c):
bool tried_direct = false
if owner && owner->is_waiting () && (protected_data == reply_protected_data || !protected_only):
for unsigned i = 0; i < 4; ++i:
if c->cap[i]:
c->cap[i] = owner->address_space->clone_capability (c->cap[i], c->copy[i])
if !c->cap[i]:
for unsigned j = 0; j < i; ++j:
owner->address_space->free_capability (c->cap[i])
tried_direct = true
break
if !tried_direct:
Thread_arch_receive (owner, protected_data, c)
owner->unwait ()
return true
// The owner was not waiting, or it was not possible to deliver the message. Put it in the queue.
Message *msg = address_space->alloc_message (this, protected_data)
if !msg:
return false
for unsigned i = 0; i < 4; ++i:
msg->data[i] = c->data[i]
if !c->cap[i]:
msg->capabilities[i] = NULL
else:
msg->capabilities[i] = address_space->clone_capability (c->cap[i], c->copy[i])
if !msg->capabilities[i]:
for unsigned j = 0; j < i; ++j:
address_space->free_capability (msg->capabilities[j])
address_space->free_message (msg)
return false
if tried_direct:
Thread_arch_receive_fail (owner)
owner->unwait ()
return true
static Capability *reply
static Receiver *reply_receiver
static void fill_cap (Capability *r, unsigned target, unsigned protected_data):
Capability **ref
if target & ~KERNEL_MASK:
ref = &((Receiver *)target)->capabilities
else:
ref = &((Object_base *)protected_data)->refs
// alloc_capability needs a Memory, but it isn't used if return storage is given.
ref = &((Object_base *)protected_data)->refs
// alloc_capability needs a Memory, but it isn't used if return storage is given.
top_memory.alloc_capability ((Receiver *)target, NULL, ref, protected_data, r)
static void reply_cap (unsigned target, unsigned protected_data):
Capability r
fill_cap (&r, target, protected_data)
Capability::Context c
for unsigned i = 0; i < 4; ++i:
c.data[i] = 0
c.cap[0] = &r
c.copy[0] = true
for unsigned i = 1; i < 4; ++i:
c.cap[i] = NULL
c.copy[i] = false
if reply:
reply->invoke (&c)
else if reply_receiver:
reply_receiver->send_message (reply_receiver->reply_protected_data, &c)
r.invalidate ()
static void reply_cap (Capability *cap, bool copy):
Capability::Context c
for unsigned i = 0; i < 4; ++i:
c.data[i] = 0
c.cap[0] = cap
c.copy[0] = copy
for unsigned i = 1; i < 4; ++i:
c.cap[i] = NULL
c.copy[i] = false
if reply:
reply->invoke (&c)
else if reply_receiver:
reply_receiver->send_message (reply_receiver->reply_protected_data, &c)
static void reply_num (unsigned num):
Capability::Context c
c.data[0] = num
for unsigned i = 1; i < 4; ++i:
c.data[i] = 0
for unsigned i = 0; i < 4; ++i:
c.cap[i] = NULL
c.copy[i] = false
if reply:
reply->invoke (&c)
else if reply_receiver:
reply_receiver->send_message (reply_receiver->reply_protected_data, &c)
static void reply_nums (unsigned num1, unsigned num2):
Capability::Context c
c.data[0] = num1
c.data[1] = num2
c.data[2] = 0
c.data[3] = 0
for unsigned i = 0; i < 4; ++i:
c.cap[i] = NULL
c.copy[i] = false
if reply:
reply->invoke (&c)
else if reply_receiver:
reply_receiver->send_message (reply_receiver->reply_protected_data, &c)
static void receiver_invoke (unsigned target, unsigned protected_data, Capability::Context *c):
Receiver *receiver = (Receiver *)protected_data
switch c->data[0]:
case CAP_RECEIVER_SET_OWNER:
if !c->cap[0] || ((unsigned)c->cap[0]->target & (CAPTYPE_MASK | ~KERNEL_MASK)) != CAPTYPE_THREAD:
// FIXME: This makes it impossible to use a fake Thread capability.
return
receiver->own ((Thread *)c->cap[0]->protected_data)
break
case CAP_RECEIVER_CREATE_CAPABILITY:
reply_cap ((unsigned)receiver, c->data[1])
break
case CAP_RECEIVER_CREATE_CALL_CAPABILITY:
reply_cap (CAPTYPE_RECEIVER | (1 << CAP_RECEIVER_CALL) | (c->data[1] ? 1 << CAP_RECEIVER_CALL_ASYNC : 0), protected_data)
break
case CAP_RECEIVER_GET_REPLY_PROTECTED_DATA:
reply_nums (receiver->reply_protected_data, receiver->protected_only ? 1 : 0)
break
case CAP_RECEIVER_SET_REPLY_PROTECTED_DATA:
receiver->reply_protected_data = c->data[1]
receiver->protected_only = c->data[2]
break
default:
break
static void memory_invoke (unsigned target, unsigned protected_data, Capability::Context *c):
Memory *mem = (Memory *)protected_data
switch c->data[0]:
case CAP_MEMORY_CREATE:
unsigned rights = c->data[1] & REQUEST_MASK
unsigned type = c->data[1] & CAPTYPE_MASK
switch type:
case CAPTYPE_RECEIVER:
Receiver *ret = mem->alloc_receiver ()
if ret:
reply_cap (CAPTYPE_RECEIVER | (rights & CAP_RECEIVER_ALL_RIGHTS), (unsigned)ret)
else:
reply_num (0)
break
case CAPTYPE_MEMORY:
Memory *ret = mem->alloc_memory ()
if ret:
reply_cap (CAPTYPE_MEMORY | (rights & CAP_MEMORY_ALL_RIGHTS), (unsigned)ret)
else:
reply_num (0)
break
case CAPTYPE_THREAD:
Thread *ret = mem->alloc_thread ()
if ret:
reply_cap (CAPTYPE_THREAD | (rights & CAP_THREAD_ALL_RIGHTS), (unsigned)ret)
else:
reply_num (0)
break
case CAPTYPE_PAGE:
Page *ret = mem->alloc_page ()
if ret:
reply_cap (CAPTYPE_PAGE | (rights & CAP_PAGE_ALL_RIGHTS), (unsigned)ret)
else:
reply_num (0)
break
case CAPTYPE_CAPPAGE:
Cappage *ret = mem->alloc_cappage ()
if ret:
reply_cap (CAPTYPE_CAPPAGE | (rights & CAP_CAPPAGE_ALL_RIGHTS), (unsigned)ret)
else:
reply_num (0)
break
default:
return
break
case CAP_MEMORY_DESTROY:
if !c->cap[0] || c->cap[0]->address_space != mem || (unsigned)c->cap[0]->target & ~KERNEL_MASK:
return
switch (unsigned)c->cap[0]->target & CAPTYPE_MASK:
case CAPTYPE_RECEIVER:
mem->free_receiver ((Receiver *)c->cap[0]->protected_data)
return
case CAPTYPE_MEMORY:
mem->free_memory ((Memory *)c->cap[0]->protected_data)
return
case CAPTYPE_THREAD:
mem->free_thread ((Thread *)c->cap[0]->protected_data)
return
case CAPTYPE_PAGE:
mem->free_page ((Page *)c->cap[0]->protected_data)
return
case CAPTYPE_CAPABILITY:
mem->free_capability ((Capability *)c->cap[0]->protected_data)
return
case CAPTYPE_CAPPAGE:
mem->free_cappage ((Cappage *)c->cap[0]->protected_data)
return
default:
panic (0x55228930, "invalid case")
break
case CAP_MEMORY_LIST:
// TODO
break
case CAP_MEMORY_MAP:
// FIXME: this should work for fake pages as well.
if !c->cap[0] || (unsigned)c->cap[0]->target & ~KERNEL_MASK || ((unsigned)c->cap[0]->target & CAPTYPE_MASK) != CAPTYPE_PAGE:
break
Page *page = (Page *)c->cap[0]->protected_data
if page->address_space != mem:
break
bool writable = c->data[1] & (unsigned)c->cap[0]->target & (1 << CAP_PAGE_WRITE)
mem->map (page, c->data[1] & PAGE_MASK, writable)
break
case CAP_MEMORY_MAPPING:
bool write
Page *page = mem->get_mapping (c->data[1], &write)
unsigned t = CAPTYPE_PAGE | REQUEST_MASK
if !write:
t &= ~CAP_PAGE_WRITE
reply_cap (t, (unsigned)page)
break
case CAP_MEMORY_SET_LIMIT:
mem->limit = c->data[1]
break
case CAP_MEMORY_GET_LIMIT:
reply_num (mem->limit)
break
case CAP_MEMORY_DROP:
if !c->cap[0] || c->cap[0]->address_space != mem:
break
mem->free_capability (c->cap[0])
break
default:
break
static void thread_invoke (unsigned target, unsigned protected_data, Capability::Context *c):
Thread *thread = (Thread *)protected_data
switch c->data[0]:
case CAP_THREAD_INFO:
unsigned *value
switch c->data[1]:
case CAP_THREAD_INFO_PC:
value = &thread->pc
break
case CAP_THREAD_INFO_SP:
value = &thread->sp
break
case CAP_THREAD_INFO_FLAGS:
// It is not possible to set the PRIV flag, but it can be reset.
c->data[2] &= ~THREAD_FLAG_PRIV
value = &thread->flags
if c->data[3] & ~THREAD_FLAG_USER:
unsigned v = (*value & c->data[3]) | (c->data[2] & c->data[3])
if (v & THREAD_FLAG_WAITING) != (*value & THREAD_FLAG_WAITING):
if v & THREAD_FLAG_WAITING:
thread->wait ()
else
thread->unwait ()
if (v & THREAD_FLAG_RUNNING) != (*value & THREAD_FLAG_RUNNING):
if v & THREAD_FLAG_RUNNING:
thread->run ()
else
thread->unrun ()
break
default:
value = Thread_arch_info (thread, c->data[1])
break
if value:
*value &= ~c->data[3]
*value |= c->data[2] & c->data[3]
reply_num (*value)
else:
reply_num (0)
break
case CAP_THREAD_SCHEDULE:
schedule ()
break
case CAP_THREAD_DEBUG:
for unsigned i = 0; i < 2; ++i:
dbg_led (true, true, true)
dbg_sleep (100)
dbg_led (false, false, false)
dbg_sleep (100)
//dbg_send (c->data[1], 5)
break
case CAP_THREAD_REGISTER_INTERRUPT:
// Threads with access to this call are trusted, so no sanity checking is done.
arch_register_interrupt (c->data[1], c->cap[0] ? (Receiver *)c->cap[0]->protected_data : NULL)
break
case CAP_THREAD_GET_TOP_MEMORY:
// Threads with access to this call are trusted, so no sanity checking is done.
reply_cap (CAPTYPE_MEMORY | (c->data[1] & CAP_MEMORY_ALL_RIGHTS), (unsigned)&top_memory)
break
case CAP_THREAD_MAKE_PRIV:
// Threads with access to this call are trusted, so no sanity checking is done.
if c->data[1] & THREAD_FLAG_PRIV:
((Thread *)c->cap[0]->protected_data)->flags |= THREAD_FLAG_PRIV
reply_cap (CAPTYPE_THREAD | (c->data[1] & CAP_THREAD_ALL_PRIV_RIGHTS), c->cap[0]->protected_data)
break
case CAP_THREAD_ALLOC_PHYSICAL:
// Threads with access to this call are trusted, so no sanity checking is done.
Page *page = (Page *)c->cap[0]->protected_data
page->forget ()
if page->data.flags & PAGE_FLAG_PAYING:
page->data.flags &= ~PAGE_FLAG_PAYING
page->address_space->unuse ()
page->data.frame = c->data[1] & PAGE_MASK
if c->data[1] & 1:
page->data.flags |= PAGE_FLAG_FRAME | PAGE_FLAG_PHYSICAL
else:
page->data.flags |= PAGE_FLAG_FRAME | PAGE_FLAG_PHYSICAL | PAGE_FLAG_UNCACHED
break
default:
break
static bool page_check_payment (Page *page):
Page *p
for p = (Page *)page->data.share_prev; p; p = (Page *)p->data.share_prev:
if p->data.flags & PAGE_FLAG_PAYING:
return true
for p = (Page *)page->data.share_next; p; p = (Page *)p->data.share_next:
if p->data.flags & PAGE_FLAG_PAYING:
return true
// No Page is paying for this frame anymore.
raw_pfree (page->data.frame)
Page *next
for p = (Page *)page->data.share_prev, next = (Page *)p->data.share_prev; p; p = next, next = (Page *)p->data.share_prev:
p->data.frame = NULL
p->data.share_prev = NULL
p->data.share_next = NULL
p->data.flags &= ~(PAGE_FLAG_SHARED | PAGE_FLAG_FRAME)
Page_arch_update_mapping (p)
for p = page, next = (Page *)p->data.share_next; p; p = next, next = (Page *)p->data.share_next:
p->data.frame = NULL
p->data.share_prev = NULL
p->data.share_next = NULL
p->data.flags &= ~(PAGE_FLAG_SHARED | PAGE_FLAG_FRAME)
Page_arch_update_mapping (p)
return false
static bool cappage_check_payment (Cappage *cappage):
Cappage *p
for p = (Cappage *)cappage->data.share_prev; p; p = (Cappage *)p->data.share_prev:
if p->data.flags & PAGE_FLAG_PAYING:
return true
for p = (Cappage *)cappage->data.share_next; p; p = (Cappage *)p->data.share_next:
if p->data.flags & PAGE_FLAG_PAYING:
return true
// No Page is paying for this frame anymore.
for unsigned i = 0; i < CAPPAGE_SIZE; ++i:
((Capability *)cappage->data.frame)[i].invalidate ()
raw_pfree (cappage->data.frame)
Cappage *next
for p = (Cappage *)cappage->data.share_prev, next = (Cappage *)p->data.share_prev; p; p = next, next = (Cappage *)p->data.share_prev:
p->data.frame = NULL
p->data.share_prev = NULL
p->data.share_next = NULL
p->data.flags &= ~(PAGE_FLAG_SHARED | PAGE_FLAG_FRAME)
for p = cappage, next = (Cappage *)p->data.share_next; p; p = next, next = (Cappage *)p->data.share_next:
p->data.frame = NULL
p->data.share_prev = NULL
p->data.share_next = NULL
p->data.flags &= ~(PAGE_FLAG_SHARED | PAGE_FLAG_FRAME)
return false
static void page_invoke (unsigned target, unsigned protected_data, Capability::Context *c):
Page *page
Cappage *cappage
ShareData *share_data
if (target & CAPTYPE_MASK) == CAPTYPE_PAGE:
page = (Page *)protected_data
cappage = NULL
share_data = &page->data
else:
page = NULL
cappage = (Cappage *)protected_data
share_data = &cappage->data
switch c->data[0]:
case CAP_PAGE_SHARE:
if !c->cap[0] || ((unsigned)c->cap[0]->target & CAPTYPE_MASK) != (target & CAPTYPE_MASK):
// FIXME: This makes it impossible to use a fake Page capability.
break
if page:
Page *t = (Page *)c->cap[0]->protected_data
t->forget ()
if c->data[1] & PAGE_SHARE_READONLY:
t->data.flags &= ~PAGE_FLAG_WRITABLE
if !page->data.flags & PAGE_FLAG_FRAME:
break
if c->data[1] & PAGE_SHARE_COPY:
if ~t->data.flags & PAGE_FLAG_PAYING:
break
if ~c->data[1] & PAGE_SHARE_FORGET || page->data.flags & PAGE_FLAG_SHARED:
unsigned *d = (unsigned *)page->data.frame
if t == page:
Page *other = page->data.share_next ? (Page *)page->data.share_next : (Page *)page->data.share_prev
if !other:
Page_arch_update_mapping (t)
break
if page->data.share_next:
((Page *)page->data.share_next)->data.share_prev = page->data.share_prev
if page->data.share_prev:
((Page *)page->data.share_prev)->data.share_next = page->data.share_next
page->data.share_next = NULL
page->data.share_prev = NULL
page_check_payment (other)
else:
t->data.flags |= PAGE_FLAG_FRAME
t->data.frame = raw_zalloc ()
for unsigned i = 0; i <= (c->data[1] & ~PAGE_MASK); i += 4:
((unsigned *)t->data.frame)[i >> 2] = d[i >> 2]
else:
if t != page:
t->data.frame = page->data.frame
t->data.flags |= PAGE_FLAG_FRAME
page->data.frame = NULL
page->data.flags &= ~PAGE_FLAG_FRAME
Page_arch_update_mapping (page)
Page_arch_update_mapping (t)
else:
if t == page:
break
if c->data[1] & PAGE_SHARE_FORGET:
if ~page->data.flags & PAGE_FLAG_SHARED:
if t->data.flags & PAGE_FLAG_PAYING:
t->data.frame = page->data.frame
t->data.flags |= PAGE_FLAG_FRAME
page->data.frame = NULL
page->data.flags &= ~PAGE_FLAG_FRAME
Page_arch_update_mapping (page)
else:
t->data.share_prev = page->data.share_prev
t->data.share_next = page->data.share_next
if t->data.share_prev:
((Page *)t->data.share_prev)->data.share_next = t
if t->data.share_next:
((Page *)t->data.share_next)->data.share_prev = t
page->data.share_prev = NULL
page->data.share_next = NULL
page->forget ()
page_check_payment (t)
else:
t->data.share_prev = page->data.share_prev
t->data.share_next = page
page->data.share_prev = t
if t->data.share_prev:
((Page *)t->data.share_prev)->data.share_next = t
Page_arch_update_mapping (t)
else:
Cappage *t = (Cappage *)c->cap[0]->protected_data
t->forget ()
if c->data[1] & PAGE_SHARE_READONLY:
t->data.flags &= ~PAGE_FLAG_WRITABLE
if !cappage->data.flags & PAGE_FLAG_FRAME:
break
if c->data[1] & PAGE_SHARE_COPY:
if ~t->data.flags & PAGE_FLAG_PAYING:
break
if ~c->data[1] & PAGE_SHARE_FORGET || cappage->data.flags & PAGE_FLAG_SHARED:
unsigned *d = (unsigned *)cappage->data.frame
if t == cappage:
Cappage *other = cappage->data.share_next ? (Cappage *)cappage->data.share_next : (Cappage *)cappage->data.share_prev
if !other:
break
if cappage->data.share_next:
((Cappage *)cappage->data.share_next)->data.share_prev = cappage->data.share_prev
if cappage->data.share_prev:
((Cappage *)cappage->data.share_prev)->data.share_next = cappage->data.share_next
cappage->data.share_next = NULL
cappage->data.share_prev = NULL
cappage_check_payment (other)
else:
t->data.flags |= PAGE_FLAG_FRAME
t->data.frame = raw_zalloc ()
for unsigned i = 0; i < ((c->data[1] & ~PAGE_MASK) + 1) * sizeof (Capability); i += 4:
((unsigned *)t->data.frame)[i >> 2] = d[i >> 2]
else:
if t != cappage:
t->data.frame = cappage->data.frame
t->data.flags |= PAGE_FLAG_FRAME
cappage->data.frame = NULL
cappage->data.flags &= ~PAGE_FLAG_FRAME
else:
if t == cappage:
break
if c->data[1] & PAGE_SHARE_FORGET:
if ~cappage->data.flags & PAGE_FLAG_SHARED:
if t->data.flags & PAGE_FLAG_PAYING:
t->data.frame = cappage->data.frame
t->data.flags |= PAGE_FLAG_FRAME
cappage->data.frame = NULL
cappage->data.flags &= ~PAGE_FLAG_FRAME
else:
t->data.share_prev = cappage->data.share_prev
t->data.share_next = cappage->data.share_next
if t->data.share_prev:
((Cappage *)t->data.share_prev)->data.share_next = t
if t->data.share_next:
((Cappage *)t->data.share_next)->data.share_prev = t
cappage->data.share_prev = NULL
cappage->data.share_next = NULL
cappage->forget ()
cappage_check_payment (t)
else:
t->data.share_prev = cappage->data.share_prev
t->data.share_next = cappage
cappage->data.share_prev = t
if t->data.share_prev:
((Cappage *)t->data.share_prev)->data.share_next = t
case CAP_PAGE_FLAGS:
// Always refuse to set reserved flags.
c->data[2] &= ~(PAGE_FLAG_PHYSICAL | PAGE_FLAG_UNCACHED)
// Remember the old flags.
unsigned old = share_data->flags
// Compute the new flags.
unsigned new_flags = (share_data->flags & ~c->data[2]) | (c->data[1] & c->data[2])
// If we stop paying, see if the frame is still paid for. If not, free it.
if ~new_flags & old & PAGE_FLAG_PAYING:
if page:
// Decrease the use counter in any case.
page->address_space->unuse ()
if !page_check_payment (page):
new_flags &= ~PAGE_FLAG_FRAME
else:
// Decrease the use counter in any case.
cappage->address_space->unuse ()
if !cappage_check_payment (cappage):
new_flags &= ~PAGE_FLAG_FRAME
// If we start paying, increase the use counter.
if new_flags & ~old & PAGE_FLAG_PAYING:
if !(page ? page->address_space : cappage->address_space)->use():
// If it doesn't work, refuse to set the flag, and refuse to allocate a frame.
new_flags &= ~(PAGE_FLAG_PAYING | PAGE_FLAG_FRAME)
if old & PAGE_FLAG_FRAME:
new_flags |= PAGE_FLAG_FRAME
// If we want a frame, see if we can get it.
if ~old & new_flags & PAGE_FLAG_FRAME:
if page:
Page *p
for p = page; p; p = (Page *)p->data.share_prev:
if p->data.flags & PAGE_FLAG_PAYING:
break
if !p:
for p = (Page *)page->data.share_next; p; p = (Page *)p->data.share_next:
if p->data.flags & PAGE_FLAG_PAYING:
break
if !p:
new_flags &= ~PAGE_FLAG_FRAME
else:
Cappage *p
for p = cappage; p; p = (Cappage *)p->data.share_prev:
if p->data.flags & PAGE_FLAG_PAYING:
break
if !p:
for p = (Cappage *)cappage->data.share_next; p; p = (Cappage *)p->data.share_next:
if p->data.flags & PAGE_FLAG_PAYING:
break
if !p:
new_flags &= ~PAGE_FLAG_FRAME
// If we can get the new frame, get it.
Capability *cap = &((Capability *)cappage->data.frame)[c->data[1]]
cap->invalidate ()
// clone_capability needs a Memory, but doesn't use it when storage is provided.
top_memory.clone_capability (c->cap[0], c->copy[0], cap)
break
default:
break
static void capability_invoke (unsigned target, unsigned protected_data, Capability::Context *c):
Capability *capability = (Capability *)protected_data
switch c->data[0]:
case CAP_CAPABILITY_GET:
reply_cap (capability, true)
break
default:
break
static bool kernel_invoke (unsigned target, unsigned protected_data, Capability::Context *c, Capability *self):
// Kernel calling convention:
// data[0] is the request.
// cap[0] is the reply capability
// other parameters' meanings depend on the operation.
if !((1 << c->data[0]) & target & ~REQUEST_MASK):
// You are not allowed to perform this operation.
return true
if (target & (CAPTYPE_MASK | (1 << CAP_RECEIVER_CALL))) == (CAPTYPE_RECEIVER | (1 << CAP_RECEIVER_CALL)):
// This is a call capability.
reply_receiver = (Receiver *)protected_data
reply_receiver->protected_only = !(target & (1 << CAP_RECEIVER_CALL_ASYNC))
Capability *c0 = c->cap[0]
if ~(unsigned)c0->target & ~KERNEL_MASK:
Capability r
fill_cap (&r, protected_data, reply_receiver->reply_protected_data)
c->cap[0] = &r
c->copy[0] = true
bool ret = kernel_invoke ((unsigned)c0->target, c0->protected_data, c, c0)
r.invalidate ()
return ret
else:
// Kernel call: don't create actual capablities.
reply = NULL
return kernel_invoke ((unsigned)c0->target, c0->protected_data, c, c0)
if (target & (CAPTYPE_MASK | (1 << CAP_RECEIVER_REPLY))) == (CAPTYPE_RECEIVER | (1 << CAP_RECEIVER_REPLY)):
// This is a reply capability.
Receiver *r = (Receiver *)protected_data
r->send_message (r->reply_protected_data, c)
while self->parent:
self = self->parent
while self->sibling_prev:
self->sibling_prev->invalidate ()
while self->sibling_next:
self->sibling_next->invalidate ()
self->invalidate ()
return true
reply = c->cap[0]
if c->data[0] == CAP_DEGRADE:
reply_cap (target & c->data[1], protected_data)
return true
switch target & CAPTYPE_MASK:
case CAPTYPE_RECEIVER:
receiver_invoke (target, protected_data, c)
break
case CAPTYPE_MEMORY:
memory_invoke (target, protected_data, c)
break
case CAPTYPE_THREAD:
thread_invoke (target, protected_data, c)
break
case CAPTYPE_PAGE:
page_invoke (target, protected_data, c)
break
case CAPTYPE_CAPABILITY:
capability_invoke (target, protected_data, c)
break
case CAPTYPE_CAPPAGE:
page_invoke (target, protected_data, c)
break
default:
panic (0x99337744, "invalid capability type invoked")
return true
bool Capability::invoke (Capability::Context *c):
if (unsigned)target & ~KERNEL_MASK:
// This is not a kernel capability: send a message to the receiver.
return target->send_message (protected_data, c)
// This is a kernel capability. Use a function to allow optimized call capabilities.
return kernel_invoke ((unsigned)target, protected_data, c, this)