1
0
mirror of git://projects.qi-hardware.com/iris.git synced 2024-11-05 04:53:08 +02:00
iris/invoke.ccp
Bas Wijnen ef1b9bfe10 more
2009-06-01 01:12:54 +02:00

695 lines
24 KiB
COBOL

#pypp 0
#include "kernel.hh"
Capability *Memory::find_capability (unsigned code, bool *copy):
*copy = code & 2 ? true : false
if code & 1:
// Cappage capability
unsigned num = (code & ~PAGE_MASK) >> 1
if num >= CAPPAGE_SIZE:
return NULL
Capability *page = (Capability *)(code & PAGE_MASK)
for Cappage *p = cappages; p; p = p->next:
if p->data.frame == (unsigned)page:
return &page[num]
else:
// Normal capability
for Capability *c = capabilities; c; c = c->next:
if c == (Capability *)code:
return c
return NULL
bool Receiver::try_deliver ():
if !messages || !owner || !owner->is_waiting ():
return false
Message *m = messages
if protected_only:
for ; m; m = m->next:
if m->protected_data == reply_protected_data:
break
if !m:
return false
Capability *c[4]
for unsigned i = 0; i < 4; ++i:
if !m->capabilities[i]:
c[i] = NULL
else:
c[i] = owner->address_space->clone_capability (m->capabilities[i], true)
if !c[i]:
for unsigned j = 0; j < i; ++j:
owner->address_space->free_capability (c[i])
return false
Thread_arch_receive (owner, m->data, c)
owner->unwait ()
return true
bool Receiver::send_message (unsigned protected_data, unsigned data[4], Capability *cap[4], bool copy[4]):
bool tried_direct = false
if owner && owner->is_waiting () && (protected_data == reply_protected_data || !protected_only):
Capability *c[4]
for unsigned i = 0; i < 4; ++i:
if !cap[i]:
c[i] = NULL
else:
c[i] = owner->address_space->clone_capability (cap[i], copy[i])
if !c[i]:
for unsigned j = 0; j < i; ++j:
owner->address_space->free_capability (c[i])
tried_direct = true
break
if !tried_direct:
Thread_arch_receive (owner, data, c)
owner->unwait ()
return true
// The owner was not waiting, or it was not possible to deliver the message. Put it in the queue.
Message *msg = address_space->alloc_message (this, protected_data)
if !msg:
return false
for unsigned i = 0; i < 4; ++i:
msg->data[i] = data[i]
if !cap[i]:
msg->capabilities[i] = NULL
else:
msg->capabilities[i] = address_space->clone_capability (cap[i], copy[i])
if !msg->capabilities[i]:
for unsigned j = 0; j < i; ++j:
address_space->free_capability (msg->capabilities[j])
address_space->free_message (msg)
return false
if tried_direct:
Thread_arch_receive_fail (owner)
owner->unwait ()
return true
static Capability *reply
static Receiver *reply_receiver
static void fill_cap (Capability *r, unsigned target, unsigned protected_data):
Capability **ref
if target & ~KERNEL_MASK:
ref = &((Receiver *)target)->capabilities
else:
ref = &((Object_base *)protected_data)->refs
// alloc_capability needs a Memory, but it isn't used if return storage is given.
top_memory.alloc_capability ((Receiver *)target, NULL, ref, protected_data, r)
static void reply_cap (unsigned target, unsigned protected_data):
Capability r
fill_cap (&r, target, protected_data)
unsigned d[4] = { 0, 0, 0, 0 }
Capability *caps[4] = { &r, NULL, NULL, NULL }
bool cops[4] = { true, false, false, false }
if reply:
reply->invoke (d, caps, cops)
else:
reply_receiver->send_message (reply_receiver->reply_protected_data, d, caps, cops)
r.invalidate ()
static void reply_cap (Capability *cap, bool copy):
unsigned d[4] = { 0, 0, 0, 0 }
Capability *caps[4] = { cap, NULL, NULL, NULL }
bool cops[4] = { copy, false, false, false }
if reply:
reply->invoke (d, caps, cops)
else:
reply_receiver->send_message (reply_receiver->reply_protected_data, d, caps, cops)
static void reply_num (unsigned num):
unsigned d[4] = { num, 0, 0, 0 }
Capability *caps[4] = { NULL, NULL, NULL, NULL }
bool cops[4] = { false, false, false, false }
if reply:
reply->invoke (d, caps, cops)
else:
reply_receiver->send_message (reply_receiver->reply_protected_data, d, caps, cops)
static void reply_nums (unsigned num1, unsigned num2):
unsigned d[4] = { num1, num2, 0, 0 }
Capability *caps[4] = { NULL, NULL, NULL, NULL }
bool cops[4] = { false, false, false, false }
if reply:
reply->invoke (d, caps, cops)
else:
reply_receiver->send_message (reply_receiver->reply_protected_data, d, caps, cops)
static void receiver_invoke (unsigned target, unsigned protected_data, Capability *cap, unsigned data[4]):
Receiver *receiver = (Receiver *)protected_data
switch data[0]:
case CAP_RECEIVER_SET_OWNER:
if ((unsigned)cap->target & (CAPTYPE_MASK | ~KERNEL_MASK)) != CAPTYPE_THREAD:
// FIXME: This makes it impossible to use a fake Thread capability.
return
receiver->own ((Thread *)cap->protected_data)
break
case CAP_RECEIVER_CREATE_CAPABILITY:
reply_cap ((unsigned)receiver, data[1])
break
case CAP_RECEIVER_CREATE_CALL_CAPABILITY:
reply_cap (CAPTYPE_RECEIVER | CAP_RECEIVER_CALL | (data[1] ? CAP_RECEIVER_CALL_ASYNC : 0), protected_data)
break
case CAP_RECEIVER_GET_REPLY_PROTECTED_DATA:
reply_nums (receiver->reply_protected_data, receiver->protected_only ? 1 : 0)
break
case CAP_RECEIVER_SET_REPLY_PROTECTED_DATA:
receiver->reply_protected_data = data[1]
receiver->protected_only = data[2]
break
default:
break
static void memory_invoke (unsigned target, unsigned protected_data, Capability *cap, unsigned request, unsigned data):
Memory *mem = (Memory *)protected_data
switch request:
case CAP_MEMORY_CREATE:
unsigned rights = data & REQUEST_MASK
data &= CAPTYPE_MASK
switch data:
case CAPTYPE_RECEIVER:
Receiver *ret = mem->alloc_receiver ()
if ret:
reply_cap (data | (rights & CAP_RECEIVER_ALL_RIGHTS), (unsigned)ret)
else:
reply_num (0)
break
case CAPTYPE_MEMORY:
Memory *ret = mem->alloc_memory ()
if ret:
reply_cap (data | (rights & CAP_MEMORY_ALL_RIGHTS), (unsigned)ret)
else:
reply_num (0)
break
case CAPTYPE_THREAD:
Thread *ret = mem->alloc_thread ()
if ret:
reply_cap (data | (rights & CAP_THREAD_ALL_RIGHTS), (unsigned)ret)
else:
reply_num (0)
break
case CAPTYPE_PAGE:
Page *ret = mem->alloc_page ()
if ret:
reply_cap (data | (rights & CAP_PAGE_ALL_RIGHTS), (unsigned)ret)
else:
reply_num (0)
break
case CAPTYPE_CAPPAGE:
Cappage *ret = mem->alloc_cappage ()
if ret:
reply_cap (data | (rights & CAP_CAPPAGE_ALL_RIGHTS), (unsigned)ret)
else:
reply_num (0)
break
default:
return
break
case CAP_MEMORY_DESTROY:
if !cap || cap->address_space != mem || (unsigned)cap->target & ~KERNEL_MASK:
return
switch (unsigned)cap->target & CAPTYPE_MASK:
case CAPTYPE_RECEIVER:
mem->free_receiver ((Receiver *)cap->protected_data)
return
case CAPTYPE_MEMORY:
mem->free_memory ((Memory *)cap->protected_data)
return
case CAPTYPE_THREAD:
mem->free_thread ((Thread *)cap->protected_data)
return
case CAPTYPE_PAGE:
mem->free_page ((Page *)cap->protected_data)
return
case CAPTYPE_CAPABILITY:
mem->free_capability ((Capability *)cap->protected_data)
return
case CAPTYPE_CAPPAGE:
mem->free_cappage ((Cappage *)cap->protected_data)
return
default:
panic (0x55228930, "invalid case")
break
case CAP_MEMORY_LIST:
// TODO
break
case CAP_MEMORY_MAP:
// FIXME: this should work for fake pages as well.
if (unsigned)cap->target & ~KERNEL_MASK || ((unsigned)cap->target & CAPTYPE_MASK) != CAPTYPE_PAGE:
break
Page *page = (Page *)cap->protected_data
if page->address_space != mem:
break
mem->map (page, data & PAGE_MASK, data & (unsigned)cap->target & (1 << CAP_PAGE_WRITE))
break
case CAP_MEMORY_MAPPING:
bool write
Page *page = mem->get_mapping (data, &write)
unsigned t = CAPTYPE_PAGE | REQUEST_MASK
if !write:
t &= ~CAP_PAGE_WRITE
reply_cap (t, (unsigned)page)
break
case CAP_MEMORY_SET_LIMIT:
mem->limit = data
break
case CAP_MEMORY_GET_LIMIT:
reply_num (mem->limit)
break
case CAP_MEMORY_DROP:
if cap->address_space != mem:
break
mem->free_capability (cap)
break
default:
break
static void thread_invoke (unsigned target, unsigned protected_data, Capability *cap, unsigned data[4]):
Thread *thread = (Thread *)protected_data
switch data[0]:
case CAP_THREAD_INFO:
unsigned *value
switch data[1]:
case CAP_THREAD_INFO_PC:
value = &thread->pc
break
case CAP_THREAD_INFO_SP:
value = &thread->sp
break
case CAP_THREAD_INFO_FLAGS:
// It is not possible to set the PRIV flag, but it can be reset.
data[2] &= ~THREAD_FLAG_PRIV
value = &thread->flags
if data[3] & ~THREAD_FLAG_USER:
unsigned v = (*value & data[3]) | (data[2] & data[3])
if (v & THREAD_FLAG_WAITING) != (*value & THREAD_FLAG_WAITING):
if v & THREAD_FLAG_WAITING:
thread->wait ()
else
thread->unwait ()
if (v & THREAD_FLAG_RUNNING) != (*value & THREAD_FLAG_RUNNING):
if v & THREAD_FLAG_RUNNING:
thread->run ()
else
thread->unrun ()
break
default:
value = Thread_arch_info (thread, data[1])
break
if value:
*value &= ~data[3]
*value |= data[2] & data[3]
reply_num (*value)
else:
reply_num (0)
break
case CAP_THREAD_SCHEDULE:
schedule ()
break
case CAP_THREAD_REGISTER_INTERRUPT:
// Threads with access to this call are trusted, so no sanity checking is done.
arch_register_interrupt (data[1], cap ? (Receiver *)cap->protected_data : NULL)
break
case CAP_THREAD_GET_TOP_MEMORY:
// Threads with access to this call are trusted, so no sanity checking is done.
reply_cap (CAPTYPE_MEMORY | (data[1] & CAP_MEMORY_ALL_RIGHTS), (unsigned)&top_memory)
break
case CAP_THREAD_MAKE_PRIV:
// Threads with access to this call are trusted, so no sanity checking is done.
if data[1] & THREAD_FLAG_PRIV:
((Thread *)cap->protected_data)->flags |= THREAD_FLAG_PRIV
reply_cap (CAPTYPE_THREAD | (data[1] & CAP_THREAD_ALL_PRIV_RIGHTS), cap->protected_data)
break
default:
break
static bool page_check_payment (Page *page):
Page *p
for p = (Page *)page->data.share_prev; p; p = (Page *)p->data.share_prev:
if p->data.flags & PAGE_FLAG_PAYING:
return true
for p = (Page *)page->data.share_next; p; p = (Page *)p->data.share_next:
if p->data.flags & PAGE_FLAG_PAYING:
return true
// No Page is paying for this frame anymore.
raw_pfree (page->data.frame)
Page *next
for p = (Page *)page->data.share_prev, next = (Page *)p->data.share_prev; p; p = next, next = (Page *)p->data.share_prev:
p->data.frame = NULL
p->data.share_prev = NULL
p->data.share_next = NULL
p->data.flags &= ~(PAGE_FLAG_SHARED | PAGE_FLAG_FRAME)
Page_arch_update_mapping (p)
for p = page, next = (Page *)p->data.share_next; p; p = next, next = (Page *)p->data.share_next:
p->data.frame = NULL
p->data.share_prev = NULL
p->data.share_next = NULL
p->data.flags &= ~(PAGE_FLAG_SHARED | PAGE_FLAG_FRAME)
Page_arch_update_mapping (p)
return false
static bool cappage_check_payment (Cappage *cappage):
Cappage *p
for p = (Cappage *)cappage->data.share_prev; p; p = (Cappage *)p->data.share_prev:
if p->data.flags & PAGE_FLAG_PAYING:
return true
for p = (Cappage *)cappage->data.share_next; p; p = (Cappage *)p->data.share_next:
if p->data.flags & PAGE_FLAG_PAYING:
return true
// No Page is paying for this frame anymore.
for unsigned i = 0; i < CAPPAGE_SIZE; ++i:
((Capability *)cappage->data.frame)[i].invalidate ()
raw_pfree (cappage->data.frame)
Cappage *next
for p = (Cappage *)cappage->data.share_prev, next = (Cappage *)p->data.share_prev; p; p = next, next = (Cappage *)p->data.share_prev:
p->data.frame = NULL
p->data.share_prev = NULL
p->data.share_next = NULL
p->data.flags &= ~(PAGE_FLAG_SHARED | PAGE_FLAG_FRAME)
for p = cappage, next = (Cappage *)p->data.share_next; p; p = next, next = (Cappage *)p->data.share_next:
p->data.frame = NULL
p->data.share_prev = NULL
p->data.share_next = NULL
p->data.flags &= ~(PAGE_FLAG_SHARED | PAGE_FLAG_FRAME)
return false
static void page_invoke (unsigned target, unsigned protected_data, Capability *cap, bool copy, unsigned data[4]):
Page *page
Cappage *cappage
ShareData *share_data
if (target & CAPTYPE_MASK) == CAPTYPE_PAGE:
page = (Page *)protected_data
cappage = NULL
share_data = &page->data
else:
page = NULL
cappage = (Cappage *)protected_data
share_data = &cappage->data
switch data[0]:
case CAP_PAGE_SHARE:
if ((unsigned)cap->target & CAPTYPE_MASK) != (target & CAPTYPE_MASK):
// FIXME: This makes it impossible to use a fake Page capability.
break
if page:
Page *t = (Page *)cap->protected_data
t->forget ()
if data[1] & PAGE_SHARE_READONLY:
t->data.flags &= ~PAGE_FLAG_WRITABLE
if !page->data.flags & PAGE_FLAG_FRAME:
break
if data[1] & PAGE_SHARE_COPY:
if ~t->data.flags & PAGE_FLAG_PAYING:
break
if ~data[1] & PAGE_SHARE_FORGET || page->data.flags & PAGE_FLAG_SHARED:
unsigned *d = (unsigned *)page->data.frame
if t == page:
Page *other = page->data.share_next ? (Page *)page->data.share_next : (Page *)page->data.share_prev
if !other:
Page_arch_update_mapping (t)
break
if page->data.share_next:
((Page *)page->data.share_next)->data.share_prev = page->data.share_prev
if page->data.share_prev:
((Page *)page->data.share_prev)->data.share_next = page->data.share_next
page->data.share_next = NULL
page->data.share_prev = NULL
page_check_payment (other)
else:
t->data.flags |= PAGE_FLAG_FRAME
t->data.frame = raw_zalloc ()
for unsigned i = 0; i <= (data[1] & ~PAGE_MASK); i += 4:
((unsigned *)t->data.frame)[i >> 2] = d[i >> 2]
else:
if t != page:
t->data.frame = page->data.frame
t->data.flags |= PAGE_FLAG_FRAME
page->data.frame = NULL
page->data.flags &= ~PAGE_FLAG_FRAME
Page_arch_update_mapping (page)
Page_arch_update_mapping (t)
else:
if t == page:
break
if data[1] & PAGE_SHARE_FORGET:
if ~page->data.flags & PAGE_FLAG_SHARED:
if t->data.flags & PAGE_FLAG_PAYING:
t->data.frame = page->data.frame
t->data.flags |= PAGE_FLAG_FRAME
page->data.frame = NULL
page->data.flags &= ~PAGE_FLAG_FRAME
Page_arch_update_mapping (page)
else:
t->data.share_prev = page->data.share_prev
t->data.share_next = page->data.share_next
if t->data.share_prev:
((Page *)t->data.share_prev)->data.share_next = t
if t->data.share_next:
((Page *)t->data.share_next)->data.share_prev = t
page->data.share_prev = NULL
page->data.share_next = NULL
page->forget ()
page_check_payment (t)
else:
t->data.share_prev = page->data.share_prev
t->data.share_next = page
page->data.share_prev = t
if t->data.share_prev:
((Page *)t->data.share_prev)->data.share_next = t
Page_arch_update_mapping (t)
else:
Cappage *t = (Cappage *)cap->protected_data
t->forget ()
if data[1] & PAGE_SHARE_READONLY:
t->data.flags &= ~PAGE_FLAG_WRITABLE
if !cappage->data.flags & PAGE_FLAG_FRAME:
break
if data[1] & PAGE_SHARE_COPY:
if ~t->data.flags & PAGE_FLAG_PAYING:
break
if ~data[1] & PAGE_SHARE_FORGET || cappage->data.flags & PAGE_FLAG_SHARED:
unsigned *d = (unsigned *)cappage->data.frame
if t == cappage:
Cappage *other = cappage->data.share_next ? (Cappage *)cappage->data.share_next : (Cappage *)cappage->data.share_prev
if !other:
break
if cappage->data.share_next:
((Cappage *)cappage->data.share_next)->data.share_prev = cappage->data.share_prev
if cappage->data.share_prev:
((Cappage *)cappage->data.share_prev)->data.share_next = cappage->data.share_next
cappage->data.share_next = NULL
cappage->data.share_prev = NULL
cappage_check_payment (other)
else:
t->data.flags |= PAGE_FLAG_FRAME
t->data.frame = raw_zalloc ()
for unsigned i = 0; i < ((data[1] & ~PAGE_MASK) + 1) * sizeof (Capability); i += 4:
((unsigned *)t->data.frame)[i >> 2] = d[i >> 2]
else:
if t != cappage:
t->data.frame = cappage->data.frame
t->data.flags |= PAGE_FLAG_FRAME
cappage->data.frame = NULL
cappage->data.flags &= ~PAGE_FLAG_FRAME
else:
if t == cappage:
break
if data[1] & PAGE_SHARE_FORGET:
if ~cappage->data.flags & PAGE_FLAG_SHARED:
if t->data.flags & PAGE_FLAG_PAYING:
t->data.frame = cappage->data.frame
t->data.flags |= PAGE_FLAG_FRAME
cappage->data.frame = NULL
cappage->data.flags &= ~PAGE_FLAG_FRAME
else:
t->data.share_prev = cappage->data.share_prev
t->data.share_next = cappage->data.share_next
if t->data.share_prev:
((Cappage *)t->data.share_prev)->data.share_next = t
if t->data.share_next:
((Cappage *)t->data.share_next)->data.share_prev = t
cappage->data.share_prev = NULL
cappage->data.share_next = NULL
cappage->forget ()
cappage_check_payment (t)
else:
t->data.share_prev = cappage->data.share_prev
t->data.share_next = cappage
cappage->data.share_prev = t
if t->data.share_prev:
((Cappage *)t->data.share_prev)->data.share_next = t
case CAP_PAGE_FLAGS:
// Remember the old flags.
unsigned old = share_data->flags
// Compute the new flags.
unsigned new_flags = (share_data->flags & ~data[2]) | (data[1] & data[2])
// If we stop paying, see if the frame is still paid for. If not, free it.
if ~new_flags & old & PAGE_FLAG_PAYING:
if page:
// Decrease the use counter in any case.
page->address_space->unuse ()
if !page_check_payment (page):
new_flags &= ~PAGE_FLAG_FRAME
else:
// Decrease the use counter in any case.
cappage->address_space->unuse ()
if !cappage_check_payment (cappage):
new_flags &= ~PAGE_FLAG_FRAME
// If we start paying, increase the use counter.
if new_flags & ~old & PAGE_FLAG_PAYING:
if !(page ? page->address_space : cappage->address_space)->use():
// If it doesn't work, refuse to set the flag, and refuse to allocate a frame.
new_flags &= ~(PAGE_FLAG_PAYING | PAGE_FLAG_FRAME)
if old & PAGE_FLAG_FRAME:
new_flags |= PAGE_FLAG_FRAME
// If we want a frame, see if we can get it.
if ~old & new_flags & PAGE_FLAG_FRAME:
if page:
Page *p
for p = page; p; p = (Page *)p->data.share_prev:
if p->data.flags & PAGE_FLAG_PAYING:
break
if !p:
for p = (Page *)page->data.share_next; p; p = (Page *)p->data.share_next:
if p->data.flags & PAGE_FLAG_PAYING:
break
if !p:
new_flags &= ~PAGE_FLAG_FRAME
else:
Cappage *p
for p = cappage; p; p = (Cappage *)p->data.share_prev:
if p->data.flags & PAGE_FLAG_PAYING:
break
if !p:
for p = (Cappage *)cappage->data.share_next; p; p = (Cappage *)p->data.share_next:
if p->data.flags & PAGE_FLAG_PAYING:
break
if !p:
new_flags &= ~PAGE_FLAG_FRAME
// If we can get the new frame, get it.
if new_flags & PAGE_FLAG_FRAME:
share_data->frame = raw_zalloc ()
// If the frame is lost, the page is no longer shared.
if old & ~new_flags & PAGE_FLAG_FRAME:
new_flags &= ~PAGE_FLAG_SHARED
if page:
if share_data->share_prev:
((Page *)share_data->share_prev)->data.share_next = share_data->share_next
if share_data->share_next:
((Page *)share_data->share_next)->data.share_prev = share_data->share_prev
else:
if share_data->share_prev:
((Cappage *)share_data->share_prev)->data.share_next = share_data->share_next
if share_data->share_next:
((Cappage *)share_data->share_next)->data.share_prev = share_data->share_prev
share_data->share_prev = NULL
share_data->share_next = NULL
// Set the shared flag.
if share_data->share_prev || share_data->share_next:
new_flags |= PAGE_FLAG_SHARED
// Don't allow making shared pages writable.
if ~old & PAGE_FLAG_WRITABLE:
new_flags &= ~PAGE_FLAG_WRITABLE
else:
new_flags &= ~PAGE_FLAG_SHARED
// Actually set the new flags.
share_data->flags = new_flags
// Update mappings if there is a change in writability, or in frame.
if page && ((share_data->flags ^ old) & PAGE_FLAG_WRITABLE || (share_data->flags ^ old) & PAGE_FLAG_FRAME):
Page_arch_update_mapping (page)
reply_num (share_data->flags)
break
case CAP_CAPPAGE_SET:
if !cappage || data[1] >= CAPPAGE_SIZE || !(target & CAP_PAGE_WRITE):
return
Capability *c = &((Capability *)cappage->data.frame)[data[1]]
c->invalidate ()
// clone_capability needs a Memory, but doesn't use it when storage is provided.
top_memory.clone_capability (cap, copy, c)
break
default:
break
static void capability_invoke (unsigned target, unsigned protected_data, Capability *cap, unsigned request, unsigned data):
Capability *capability = (Capability *)protected_data
switch request:
case CAP_CAPABILITY_GET:
reply_cap (capability, true)
break
default:
break
static bool kernel_invoke (unsigned target, unsigned protected_data, unsigned d[4], Capability *c[4], bool copy[4], Capability *self):
// Kernel calling convention:
// data[0] is the request.
// cap[0] is the reply capability
// other parameters' meanings depend on the operation.
if !((1 << d[0]) & target & ~REQUEST_MASK):
// You are not allowed to perform this operation.
return true
if (target & (CAPTYPE_MASK | (1 << CAP_RECEIVER_CALL))) == (CAPTYPE_RECEIVER | (1 << CAP_RECEIVER_CALL)):
// This is a call capability.
reply_receiver = (Receiver *)protected_data
reply_receiver->protected_only = !(target & (1 << CAP_RECEIVER_CALL_ASYNC))
Capability r
Capability *c0 = c[0]
if ~(unsigned)c0->target & ~KERNEL_MASK:
fill_cap (&r, protected_data, reply_receiver->reply_protected_data)
c[0] = &r
copy[0] = true
bool ret = kernel_invoke ((unsigned)c0->target, c0->protected_data, d, c, copy, c0)
r.invalidate ()
return ret
else:
// Kernel call: don't create actual capablities.
reply = NULL
return kernel_invoke ((unsigned)c0->target, c0->protected_data, d, c, copy, c0)
if (target & (CAPTYPE_MASK | (1 << CAP_RECEIVER_REPLY))) == (CAPTYPE_RECEIVER | (1 << CAP_RECEIVER_REPLY)):
// This is a reply capability.
Receiver *r = (Receiver *)protected_data
r->send_message (r->reply_protected_data, d, c, copy)
while self->parent:
self = self->parent
while self->sibling_prev:
self->sibling_prev->invalidate ()
while self->sibling_next:
self->sibling_next->invalidate ()
self->invalidate ()
return true
reply = c[0]
if d[0] == CAP_DEGRADE:
reply_cap (target & d[1], protected_data)
return true
switch target & CAPTYPE_MASK:
case CAPTYPE_RECEIVER:
receiver_invoke (target, protected_data, c[1], d)
break
case CAPTYPE_MEMORY:
memory_invoke (target, protected_data, c[1], d[0], d[1])
break
case CAPTYPE_THREAD:
thread_invoke (target, protected_data, c[1], d)
break
case CAPTYPE_PAGE:
page_invoke (target, protected_data, c[1], copy[1], d)
break
case CAPTYPE_CAPABILITY:
capability_invoke (target, protected_data, c[1], d[0], d[1])
break
case CAPTYPE_CAPPAGE:
page_invoke (target, protected_data, c[1], copy[1], d)
break
default:
panic (0x99337744, "invalid capability type invoked")
return true
bool Capability::invoke (unsigned data[4], Capability *cap[4], bool copy[4]):
if (unsigned)target & ~KERNEL_MASK:
// This is not a kernel capability: send a message to the receiver.
return target->send_message (protected_data, data, cap, copy)
// This is a kernel capability. Use a function to allow optimized call capabilities.
return kernel_invoke ((unsigned)target, protected_data, data, cap, copy, this)