1
0
mirror of git://projects.qi-hardware.com/iris.git synced 2025-04-21 12:27:27 +03:00

compiling again with new capability scheme

This commit is contained in:
Bas Wijnen
2009-08-17 23:11:15 +02:00
parent 7b99ba0bdf
commit a892e9cfc0
21 changed files with 1616 additions and 2053 deletions

View File

@@ -19,7 +19,7 @@
#define ARCH
#include "../kernel.hh"
void Thread_arch_init (Thread *thread):
void kThread_arch_init (kThread *thread):
thread->arch.at = 0
thread->arch.v0 = 0
thread->arch.v1 = 0
@@ -45,14 +45,17 @@ void Thread_arch_init (Thread *thread):
thread->arch.k0 = 0
thread->arch.k1 = 0
void Thread_arch_receive (Thread *thread, unsigned protected_data, unsigned *data):
thread->arch.t0 = data[0]
thread->arch.t1 = data[1]
thread->arch.t2 = data[2]
thread->arch.t3 = data[3]
thread->arch.v0 = protected_data
void kThread_arch_receive (kThread *thread, Num cap_protected, Num recv_protected, Num *data):
thread->arch.a0 = data[0].l
thread->arch.a1 = data[0].h
thread->arch.a2 = data[1].l
thread->arch.a3 = data[1].h
thread->arch.s0 = cap_protected.l
thread->arch.s1 = cap_protected.h
thread->arch.s2 = recv_protected.l
thread->arch.s3 = recv_protected.h
unsigned *Thread_arch_info (Thread *thread, unsigned num):
unsigned *kThread_arch_info (kThread *thread, unsigned num):
switch num:
case 1:
return &thread->arch.at
@@ -119,29 +122,29 @@ unsigned *Thread_arch_info (Thread *thread, unsigned num):
default:
return NULL
void Memory_arch_init (Memory *mem):
void kMemory_arch_init (kMemory *mem):
mem->arch.asid = 1
mem->arch.directory = NULL
mem->arch.shadow = NULL
void Memory_arch_free (Memory *mem):
void kMemory_arch_free (kMemory *mem):
while mem->arch.first_page_table:
mem->unmap (mem->arch.first_page_table->first_page->page, mem->arch.first_page_table->first_page->mapping)
if (Memory *)asids[mem->arch.asid] == mem:
if (kMemory *)asids[mem->arch.asid] == mem:
flush_tlb (mem->arch.asid)
asids[mem->arch.asid] = asids[0]
asids[0] = mem->arch.asid
mem->unuse ()
mem->zfree ((unsigned)mem->arch.directory)
static arch_page_table *alloc_page_table (Memory *mem):
static arch_page_table *alloc_page_table (kMemory *mem):
arch_page_table *ret = (arch_page_table *)mem->search_free (sizeof (arch_page_table), (void **)&mem->arch.first_page_table)
if !ret:
return NULL
ret->first_page = NULL
return ret
static arch_page *alloc_page (Memory *mem, arch_page_table *t):
static arch_page *alloc_page (kMemory *mem, arch_page_table *t):
arch_page *ret = (arch_page *)mem->search_free (sizeof (arch_page), (void **)&t->first_page)
if !ret:
return NULL
@@ -152,7 +155,7 @@ static arch_page *alloc_page (Memory *mem, arch_page_table *t):
return ret
static void free_page_table (arch_page_table *t, unsigned idx):
Memory *mem = t->address_space
kMemory *mem = t->address_space
mem->zfree ((unsigned)mem->arch.directory[idx])
mem->arch.directory[idx] = NULL
mem->arch.shadow[idx] = NULL
@@ -188,19 +191,19 @@ static void free_page (arch_page_table *t, arch_page *p):
if !t->first_page:
free_page_table (t, idx)
static unsigned make_entry_lo (Page *page, bool write):
static unsigned make_entry_lo (kPage *page, bool readonly):
if !page->frame:
return 0
unsigned flags
if page->flags & PAGE_FLAG_UNCACHED:
if page->flags & Page::UNCACHED:
flags = 0x10 | 0x2
else:
flags = 0x18 | 0x2
if write:
if !readonly:
flags |= 0x4
return ((page->frame & ~0x80000000) >> 6) | flags
bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write):
bool kMemory_arch_map (kMemory *mem, kPage *page, unsigned address, bool readonly):
if address >= 0x80000000:
return false
address &= PAGE_MASK
@@ -243,10 +246,10 @@ bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write):
return false
unsigned idx = (address >> 12) & ((1 << 9) - 1)
if table[idx]:
mem->unmap ((Page *)table[idx + 0x200], address)
table[idx] = make_entry_lo (page, write)
mem->unmap ((kPage *)table[idx + 0x200], address)
table[idx] = make_entry_lo (page, readonly)
table[idx + 0x200] = (unsigned)p
p->mapping = address + write
p->mapping = address + readonly
p->page = page
p->next_mapped = page->arch.first_mapped
if p->next_mapped:
@@ -254,7 +257,7 @@ bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write):
page->arch.first_mapped = p
return true
void Memory_arch_unmap (Memory *mem, Page *page, unsigned address):
void kMemory_arch_unmap (kMemory *mem, kPage *page, unsigned address):
unsigned didx = address >> 21
unsigned tidx = (address >> 12) & ((1 << 9) - 1)
unsigned *table = mem->arch.directory[didx]
@@ -264,34 +267,34 @@ void Memory_arch_unmap (Memory *mem, Page *page, unsigned address):
table[tidx + 0x200] = 0
free_page (t, p)
Page *Memory_arch_get_mapping (Memory *mem, unsigned address, bool *writable):
kPage *kMemory_arch_get_mapping (kMemory *mem, unsigned address, bool *readonly):
if address >= 0x80000000:
return NULL
unsigned *table = mem->arch.directory[address >> 21]
unsigned idx = (address >> 12) & ((1 << 9) - 1)
arch_page *page = (arch_page *)table[idx + 0x200]
if writable:
*writable = table[idx] & 4
if readonly:
*readonly = !(table[idx] & 4)
return page->page
void Page_arch_update_mapping (Page *page):
void kPage_arch_update_mapping (kPage *page):
if !page->arch.first_mapped:
return
Memory *as = page->address_space
unsigned target = make_entry_lo (page, page->flags & PAGE_FLAG_WRITABLE)
kMemory *as = page->address_space
unsigned target = make_entry_lo (page, page->flags & Page::READONLY)
for arch_page *p = page->arch.first_mapped; p; p = p->next_mapped:
unsigned de = p->mapping >> 21
unsigned te = (p->mapping >> 12) & ((1 << 9) - 1)
bool write = p->mapping & 1
bool readonly = p->mapping & 1
unsigned t
if p->mapping & 1:
t = target
else:
if readonly:
t = target & ~0x4
else:
t = target
as->arch.directory[de][te] = t
tlb_reset (p->mapping & ~1, as->arch.asid, t)
void arch_register_interrupt (unsigned num, Receiver *r):
void arch_register_interrupt (unsigned num, kReceiver *r):
arch_interrupt_receiver[num] = r
// And enable or disable the interrupt.
if r:

View File

@@ -68,7 +68,7 @@
#endif
#ifdef __KERNEL
// register save positions in Thread
// register save positions in kThread
#define SAVE_PC (6 * 4)
#define SAVE_SP (SAVE_PC + 4)
#define SAVE_AT (SAVE_SP + 4)
@@ -108,7 +108,7 @@
void flush_tlb (unsigned asid)
struct Thread_arch:
struct kThread_arch:
unsigned at, v0, v1, a0, a1, a2, a3
unsigned t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
unsigned s0, s1, s2, s3, s4, s5, s6, s7
@@ -122,41 +122,41 @@ struct Thread_arch:
// bits 12-20 are an index in the page table, bits 21-30
// are an index in the page directory and bit 31 is always 0.
struct arch_page : public Object :
Page *page
struct arch_page : public kObject :
kPage *page
unsigned mapping
arch_page *prev_mapped, *next_mapped
struct arch_page_table : public Object :
struct arch_page_table : public kObject :
arch_page *first_page
struct Page_arch:
struct kPage_arch:
arch_page *first_mapped
struct Memory_arch:
struct kMemory_arch:
unsigned asid
unsigned **directory
arch_page_table **shadow
arch_page_table *first_page_table
// Pointers to Memory when asid is taken, index of next free, or 0, if free.
// Pointers to kMemory when asid is taken, index of next free, or 0, if free.
// asid[0] is used as index to first free asid.
EXTERN unsigned asids[64]
EXTERN Receiver *arch_interrupt_receiver[32]
EXTERN kReceiverP arch_interrupt_receiver[32]
// Functions which can be called from assembly must not be mangled.
extern "C":
// Kernel entry points, called from entry.S.
Thread *interrupt ()
Thread *cache_error ()
Thread *exception ()
Thread *tlb_refill ()
kThread *interrupt ()
kThread *cache_error ()
kThread *exception ()
kThread *tlb_refill ()
#ifdef INIT
// Initialize most things (the rest is done in boot.S)
void init (unsigned mem)
// Start running the idle task for the first time.
void run_idle (Thread *self)
void run_idle (kThread *self)
#endif
// These are "extern", not "EXTERN", because they really are defined elsewhere.

View File

@@ -30,7 +30,7 @@ static void init_idle ():
idle.schedule_next = NULL
idle.address_space = &idle_memory
idle.refs.reset ()
idle.flags = THREAD_FLAG_RUNNING | THREAD_FLAG_PRIV
idle.flags = Thread::RUNNING | Thread::PRIV
// initialize idle_memory.
idle_memory.prev = NULL
idle_memory.next = NULL
@@ -49,7 +49,7 @@ static void init_idle ():
idle_page.prev = NULL
idle_page.next = NULL
idle_page.frame = 0x80000000
idle_page.flags = PAGE_FLAG_WRITABLE | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME
idle_page.flags = Page::PAYING | Page::FRAME
idle_page.refs.reset ()
idle_page.address_space = NULL
current = &idle
@@ -104,15 +104,15 @@ static void init_cp0 ():
// exceptions in the bootup code will fill EPC and friends.
static void init_threads ():
Thread *previous = NULL
kThread *previous = NULL
first_scheduled = NULL
first_alarm = NULL
Receiver *init_receiver = NULL
kReceiver *init_receiver = NULL
for unsigned i = 0; i < NUM_THREADS; ++i:
Memory *mem = top_memory.alloc_memory ()
kMemory *mem = top_memory.alloc_memory ()
assert (mem)
Thread *thread = mem->alloc_thread (3)
Page **pages = (Page **)mem->zalloc ()
kThread *thread = mem->alloc_thread (3)
kPage **pages = (kPage **)mem->zalloc ()
Elf32_Ehdr *header = (Elf32_Ehdr *)thread_start[i]
for unsigned j = 0; j < SELFMAG; ++j:
if header->e_ident[j] != ELFMAG[j]:
@@ -139,7 +139,7 @@ static void init_threads ():
Elf32_Shdr *shdr = (Elf32_Shdr *)(thread_start[i] + header->e_shoff + section * header->e_shentsize)
if ~shdr->sh_flags & SHF_ALLOC:
continue
bool writable = shdr->sh_flags & SHF_WRITE
bool readonly = !(shdr->sh_flags & SHF_WRITE)
//bool executable = shdr->sh_flags & SHF_EXEC_INSTR
if shdr->sh_type != SHT_NOBITS:
unsigned file_offset = shdr->sh_offset >> PAGE_BITS
@@ -152,18 +152,18 @@ static void init_threads ():
if !pages[idx]:
pages[idx] = mem->alloc_page ()
pages[idx]->frame = thread_start[i] + (idx << PAGE_BITS)
pages[idx]->flags = PAGE_FLAG_WRITABLE | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME
pages[idx]->flags = Page::PAYING | Page::FRAME
++top_memory.limit
mem->use ()
if !mem->map (pages[idx], p, writable):
if !mem->map (pages[idx], p, readonly):
panic (0x22446688, "unable to map initial page")
return
else:
if !writable:
if readonly:
panic (0x33399993, "unwritable bss section")
return
for unsigned p = (shdr->sh_addr & PAGE_MASK); p < shdr->sh_addr + shdr->sh_size; p += PAGE_SIZE:
Page *page = mem->get_mapping (p, &writable)
kPage *page = mem->get_mapping (p, &readonly)
if !page:
page = mem->alloc_page ()
if !page:
@@ -173,12 +173,12 @@ static void init_threads ():
if !page->frame:
panic (0x02220022, "out of memory");
return
page->flags = PAGE_FLAG_WRITABLE | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME
page->flags = Page::PAYING | Page::FRAME
if !mem->map (page, p, true):
panic (0x33557799, "unable to map initial bss page")
return
else:
if !writable:
if readonly:
panic (0x20203030, "bss section starts on read-only page")
return
for unsigned a = p; a < ((p + PAGE_SIZE) & PAGE_MASK); a += 4:
@@ -192,28 +192,26 @@ static void init_threads ():
continue
++top_memory.limit
top_memory.pfree (thread_start[i] + (p << PAGE_BITS))
Page *stackpage = mem->alloc_page ()
kPage *stackpage = mem->alloc_page ()
stackpage->frame = mem->zalloc ()
stackpage->flags = PAGE_FLAG_WRITABLE | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME
stackpage->flags = Page::PAYING | Page::FRAME
if !stackpage || !mem->map (stackpage, 0x7ffff000, true):
panic (0x13151719, "unable to map initial stack page")
return
thread->caps[0] = mem->alloc_caps (16)
for unsigned r = 0; r < 4; ++r:
thread->rcaps[r] = CapRef ()
Receiver *recv = mem->alloc_receiver ()
kReceiver *recv = mem->alloc_receiver ()
recv->owner = thread
thread->receivers = recv
thread->caps[0]->set (__my_receiver, (ReceiverP)(CAPTYPE_RECEIVER | CAP_RECEIVER_ALL_RIGHTS), (Protected)recv, CapRef ())
thread->caps[0]->set (__my_thread, (ReceiverP)(CAPTYPE_THREAD | CAP_THREAD_ALL_PRIV_RIGHTS), (Protected)thread, CapRef ())
thread->caps[0]->set (__my_memory, (ReceiverP)(CAPTYPE_MEMORY | CAP_MEMORY_ALL_RIGHTS), (Protected)mem, CapRef ())
thread->caps[0]->set (__my_call, (ReceiverP)(CAPTYPE_RECEIVER | (1 << CAP_RECEIVER_CALL)), (Protected)recv, CapRef ())
thread->flags = THREAD_FLAG_RUNNING | THREAD_FLAG_PRIV
thread->caps[0]->set (__receiver_num, (kReceiverP)(CAPTYPE_RECEIVER | CAP_MASTER), Num ((unsigned)recv), kCapRef (), &recv->refs)
thread->caps[0]->set (__thread_num, (kReceiverP)(CAPTYPE_THREAD | CAP_MASTER), Num ((unsigned)thread), kCapRef (), &thread->refs)
thread->caps[0]->set (__memory_num, (kReceiverP)(CAPTYPE_MEMORY | CAP_MASTER), Num ((unsigned)mem), kCapRef (), &mem->refs)
thread->caps[0]->set (__call_num, (kReceiverP)(CAPTYPE_RECEIVER | Receiver::CALL), Num ((unsigned)recv), kCapRef (), &recv->refs)
thread->flags = Thread::RUNNING | Thread::PRIV
if !i:
first_scheduled = thread
init_receiver = recv
else:
thread->caps[0]->set (__my_parent, init_receiver, i, CapRef ())
thread->caps[0]->set (__parent_num, init_receiver, i, kCapRef (), &init_receiver->capabilities)
previous->schedule_next = thread
thread->schedule_prev = previous
thread->schedule_next = NULL

View File

@@ -29,7 +29,7 @@ static void handle_exit ():
schedule ()
if !current:
current = &idle
if (current->flags & (THREAD_FLAG_RUNNING | THREAD_FLAG_WAITING)) != THREAD_FLAG_RUNNING:
if (current->flags & (Thread::RUNNING | Thread::WAITING)) != Thread::RUNNING:
panic (current->flags, "non-scheduled thread running")
if !current:
current = &idle
@@ -37,7 +37,7 @@ static void handle_exit ():
return
arch_flush_cache ()
if current != &idle:
if (Memory *)asids[current->address_space->arch.asid] != current->address_space:
if (kMemory *)asids[current->address_space->arch.asid] != current->address_space:
if asids[0]:
current->address_space->arch.asid = asids[0]
asids[0] = asids[asids[0]]
@@ -52,7 +52,7 @@ static void handle_exit ():
asids[current->address_space->arch.asid] = (unsigned)current->address_space
cp0_set (CP0_ENTRY_HI, current->address_space->arch.asid)
directory = current->address_space->arch.directory
if current->flags & THREAD_FLAG_PRIV:
if current->flags & Thread::PRIV:
cp0_set (CP0_STATUS, 0x1000ff13)
else:
cp0_set (CP0_STATUS, 0x0000ff13)
@@ -60,7 +60,7 @@ static void handle_exit ():
/// A TLB miss has occurred. This is the slow version. It is only used
/// when k0 or k1 is not 0, or when an error occurs.
/// Otherwise, the ultra-fast code in entry.S is used.
Thread *tlb_refill ():
kThread *tlb_refill ():
old_current = current
if !directory:
unsigned addr
@@ -85,7 +85,7 @@ Thread *tlb_refill ():
return current
/// An interrupt which is not an exception has occurred.
Thread *interrupt ():
kThread *interrupt ():
old_current = current
unsigned ipr = INTC_IPR
for unsigned i = 0; i < 32; ++i:
@@ -98,11 +98,10 @@ Thread *interrupt ():
intc_ack_irq (i)
// Send message to interrupt handler.
if arch_interrupt_receiver[i]:
Capability::Context c
for unsigned j = 0; j < 4; ++j:
kCapability::Context c
for unsigned j = 0; j < 2; ++j:
c.data[j] = 0
c.cap[j].reset ()
c.copy[j] = false
c.caps = NULL
arch_interrupt_receiver[i]->send_message (i, &c)
arch_interrupt_receiver[i] = NULL
if ipr & (1 << IRQ_OST0):
@@ -124,42 +123,49 @@ void flush_tlb (unsigned asid):
__asm__ volatile ("tlbwi")
static void arch_invoke ():
CapRef target
kCapRef target
bool wait
target = old_current->find_capability (old_current->arch.v0, &wait)
do_schedule = false
kCapability::Context msg
unsigned num = old_current->arch.s2
unsigned first = old_current->arch.s3
if num:
if num > 10:
num = 10
bool copy
if old_current->arch.s0 < old_current->slots:
msg.caps = old_current->caps[old_current->arch.s0]
if msg.caps && first < msg.caps->size:
for unsigned i = first; i < num && i < msg.caps->size; ++i:
msg.caps->cap (i)->invalidate ()
kCapRef t = old_current->find_capability ((&old_current->arch.t0)[i], &copy)
if t:
msg.caps->clone (i, t, copy)
else:
msg.caps = NULL
else:
msg.caps = NULL
if wait:
bool dummy
CapRef c0, c1, c2, c3
c0 = old_current->find_capability (old_current->arch.t4, &dummy)
c1 = old_current->find_capability (old_current->arch.t5, &dummy)
c2 = old_current->find_capability (old_current->arch.t6, &dummy)
c3 = old_current->find_capability (old_current->arch.t7, &dummy)
old_current->wait (c0, c1, c2, c3)
old_current->recv_slot = old_current->arch.s1
old_current->wait ()
if !target:
if (old_current->arch.v0 & ~CAPABILITY_COPY) != ~CAPABILITY_COPY:
if (old_current->arch.v0 & ~CAP_COPY) != ~CAP_COPY:
panic (old_current->arch.v0, "debug")
// There must be no action here.
return
Capability::Context c
c.cap[0] = old_current->find_capability (old_current->arch.a0, &c.copy[0])
c.cap[1] = old_current->find_capability (old_current->arch.a1, &c.copy[1])
c.cap[2] = old_current->find_capability (old_current->arch.a2, &c.copy[2])
c.cap[3] = old_current->find_capability (old_current->arch.a3, &c.copy[3])
c.data[0] = old_current->arch.t0
c.data[1] = old_current->arch.t1
c.data[2] = old_current->arch.t2
c.data[3] = old_current->arch.t3
target->invoke (&c)
msg.data[0] = Num (old_current->arch.a0, old_current->arch.a1)
msg.data[1] = Num (old_current->arch.a2, old_current->arch.a3)
target->invoke (&msg)
if do_schedule && !wait:
// If the call was to schedule without wait, it isn't done yet.
schedule ()
else if old_current != current && (old_current->flags & (THREAD_FLAG_RUNNING | THREAD_FLAG_WAITING)) == THREAD_FLAG_RUNNING:
else if old_current != current && (old_current->flags & (Thread::RUNNING | Thread::WAITING)) == Thread::RUNNING:
// If the caller received an immediate reply from the kernel, it is no longer set as current. Don't let it lose its timeslice.
current = old_current
/// A general exception has occurred.
Thread *exception ():
kThread *exception ():
old_current = current
unsigned cause
cp0_get (CP0_CAUSE, cause)
@@ -288,7 +294,7 @@ Thread *exception ():
return current
/// There's a cache error. Big trouble. Probably not worth trying to recover.
Thread *cache_error ():
kThread *cache_error ():
panic (0x33333333, "cache error")
old_current = current
handle_exit ()

View File

@@ -136,13 +136,13 @@
#define LCD_FRAMEBUFFER_BASE ((unsigned short *)0x00021000)
// Map IO memory (requires a priviledged __my_thread capability).
#include <iris.h>
#include <iris.hh>
static void __map_io (unsigned physical, unsigned mapping):
memory_create_page (6, __my_memory)
// 0 means not cachable; 0 means don't free when done.
alloc_physical (6, physical, 0, 0)
// 1 means writable.
memory_map (__my_memory, 6, mapping, 1)
Page p = __my_memory.create_page ()
// false means not cachable; false means don't free when done.
p.alloc_physical (physical, 0, 0)
// true means writable.
__my_memory.map (p, mapping, true)
#define map_harb() do { __map_io (HARB_PHYSICAL, HARB_BASE); } while (0)
#define map_emc() do { __map_io (EMC_PHYSICAL, EMC_BASE); } while (0)
@@ -2323,9 +2323,9 @@ static __inline__ void udelay (unsigned us):
GPIO_GPDR (0) = GPIO_GPDR (0)
#ifndef __KERNEL
static __inline__ void ddelay (unsigned ds):
Message m
my_sleep (ds, &m)
static __inline__ void cdelay (unsigned ds):
__my_receiver.set_alarm (ds * (HZ / 100))
Cap ().call (~0)
#endif
/***************************************************************************
@@ -3034,9 +3034,7 @@ static __inline__ void i2c_open ():
// Note that this kills messages from the queue.
static __inline__ void i2c_close ():
Message msg
receiver_set_alarm (__my_receiver, 3 * HZ / 10)
call_00 (0)
cdelay (30)
i2c_disable ()
static __inline__ bool i2c_send (unsigned data):