1
0
mirror of git://projects.qi-hardware.com/iris.git synced 2024-11-13 09:00:16 +02:00

things seem to start working again

This commit is contained in:
Bas Wijnen 2010-01-17 10:01:42 +01:00
parent 7dc6ecb0ea
commit 05f79658e1
10 changed files with 219 additions and 306 deletions

View File

@ -176,6 +176,7 @@ kPage *kMemory::alloc_page ():
return NULL
ret->frame = 0
ret->flags = 0
ret->mapping = ~0
return ret
kThread *kMemory::alloc_thread (unsigned size):
@ -303,6 +304,8 @@ void kCaps::clone (unsigned index, kCapRef source, bool copy):
set (index, source->target, source->protected_data, source)
void kMemory::free_page (kPage *page):
if page->mapping != ~0:
page->address_space->unmap (page)
if page->flags & Kernel::Page::PAYING:
unuse ()
if page->frame:
@ -458,7 +461,6 @@ void kMemory::free_memory (kMemory *mem):
free_list (mem->lists)
while mem->listitems:
free_listitem (mem->listitems)
kMemory_arch_free (mem)
if mem->frees:
panic (0, "kernel memory leak: memory still in use")
free_obj (mem, (void **)&memories)

View File

@ -27,24 +27,24 @@ static unsigned _free
extern unsigned _end
void init_alloc ():
_free = (unsigned)&_end
_free = ((unsigned)&_end + PAGE_SIZE - 1) & PAGE_MASK
char *alloc_space (unsigned pages):
unsigned ret = (_free + PAGE_SIZE - 1) & PAGE_MASK
_free = ret + pages * PAGE_SIZE
_free = ret + (pages << PAGE_BITS)
return (char *)ret
void *operator new[] (unsigned size):
//kdebug ("new ")
void *ret = (void *)_free
size = (size + 3) & ~3
unsigned rest = PAGE_SIZE - (((_free - 1) & ~PAGE_MASK) + 1)
if rest < size:
unsigned pages = ((size - rest) + PAGE_SIZE - 1) >> PAGE_BITS
char *space = alloc_space (pages)
for unsigned p = 0; p < pages; ++p:
Kernel::Page page = Kernel::my_memory.create_page ()
page.set_flags (Kernel::Page::PAYING | Kernel::Page::FRAME, Kernel::Page::PAYING | Kernel::Page::FRAME)
Kernel::my_memory.map (page, (unsigned)&space[p << PAGE_BITS])
Kernel::my_memory.map (page, _free + rest + (p << PAGE_BITS))
Kernel::free_cap (page)
_free += size
//kdebug_num ((unsigned)ret)
@ -125,9 +125,6 @@ static void list_files (Directory root):
kdebug ("\n")
Kernel::panic (0)
num_files = fullsize.l
kdebug ("files in directory: ")
kdebug_num (num_files)
kdebug ("\n")
files = new file[num_files]
Kernel::Caps caps = Kernel::my_memory.create_caps (num_files)
unsigned slot = Kernel::alloc_slot ()
@ -184,15 +181,14 @@ static void run (file *f, bool priv):
Kernel::Memory mem = top_memory.create_memory ()
unsigned num_pages = (f->size + PAGE_SIZE - 1) >> PAGE_BITS
for unsigned p = 0; p < num_pages; ++p:
kdebug_num (p)
kdebug ("/")
kdebug_num (num_pages)
kdebug ("\n")
//kdebug_num (p)
//kdebug ("/")
//kdebug_num (num_pages)
//kdebug ("\n")
Kernel::set_recv_arg (Kernel::Cap (slot, p))
Kernel::my_memory.create_page ()
Kernel::Page (slot, p).set_flags (Kernel::Page::PAYING, Kernel::Page::PAYING)
f->string.get_page (p << PAGE_BITS, Kernel::Cap (slot, p))
kdebug_line ()
Kernel::my_memory.map (Kernel::Cap (slot, p), (unsigned)&mapping[p << PAGE_BITS])
Kernel::Thread thread = mem.create_thread (NUM_SLOTS)
if priv:
@ -229,24 +225,18 @@ static void run (file *f, bool priv):
return
thread.set_pc (header->e_entry)
thread.set_sp (0x80000000)
kdebug_line ()
for unsigned section = 0; section < header->e_shnum; ++section:
kdebug_line ()
Elf32_Shdr *shdr = (Elf32_Shdr *)((unsigned)mapping + header->e_shoff + section * header->e_shentsize)
if ~shdr->sh_flags & SHF_ALLOC:
continue
kdebug_line ()
bool readonly = !(shdr->sh_flags & SHF_WRITE)
//bool executable = shdr->sh_flags & SHF_EXEC_INSTR
kdebug_line ()
if shdr->sh_type != SHT_NOBITS:
kdebug_line ()
unsigned file_offset = shdr->sh_offset >> PAGE_BITS
if (file_offset + ((shdr->sh_size + PAGE_SIZE - 1) >> PAGE_BITS)) >= (PAGE_SIZE >> 2):
kdebug ("thread too large\n")
Kernel::panic (0)
return
kdebug_line ()
for unsigned p = (shdr->sh_addr & PAGE_MASK); p < shdr->sh_addr + shdr->sh_size; p += PAGE_SIZE:
unsigned section_offset = (p - (shdr->sh_addr & PAGE_MASK)) >> PAGE_BITS
unsigned idx = file_offset + section_offset
@ -257,24 +247,28 @@ static void run (file *f, bool priv):
continue
Kernel::free_cap (page)
page = mem.create_page ()
page.set_flags (Kernel::Page::PAYING, Kernel::Page::PAYING)
unsigned f
if readonly:
f = Kernel::Page::PAYING | Kernel::Page::MAPPED_READONLY
else:
f = Kernel::Page::PAYING
page.set_flags (f, f)
Kernel::Page (slot, idx).share (page, 0)
kdebug ("mapping at ")
kdebug_num (p)
kdebug ("\n")
if !mem.map (page, p, readonly):
//kdebug ("mapping at ")
//kdebug_num (p)
//if readonly:
// kdebug (" (readonly)")
//kdebug ("\n")
if !mem.map (page, p):
kdebug ("unable to map page\n")
Kernel::panic (0)
return
Kernel::free_cap (page)
kdebug_line ()
else:
kdebug_line ()
if readonly:
kdebug ("unwritable bss section\n")
Kernel::panic (0)
return
kdebug_line ()
for unsigned p = (shdr->sh_addr & PAGE_MASK); p < shdr->sh_addr + shdr->sh_size; p += PAGE_SIZE:
Kernel::Page page = mem.mapping ((void *)p)
if Kernel::recv.data[0].l == Kernel::NO_ERROR:
@ -302,103 +296,53 @@ static void run (file *f, bool priv):
Kernel::panic (0)
return
Kernel::free_cap (page)
kdebug_line ()
kdebug_line ()
for unsigned p = 0; p <= num_pages; ++p:
for unsigned p = 0; p < num_pages; ++p:
Kernel::my_memory.destroy (Kernel::Page (slot, p))
kdebug_line ()
Kernel::Page stackpage = mem.create_page ()
kdebug_line ()
stackpage.set_flags (Kernel::Page::PAYING | Kernel::Page::FRAME, Kernel::Page::PAYING | Kernel::Page::FRAME)
kdebug_line ()
if Kernel::recv.data[0].l != Kernel::NO_ERROR || !mem.map (stackpage, 0x7ffff000):
kdebug ("unable to map initial stack page\n")
Kernel::panic (0)
return
kdebug_line ()
Kernel::free_cap (stackpage)
Kernel::Caps caps = mem.create_caps (NUM_CAPS)
kdebug_line ()
thread.use (caps, 0)
thread.set_info (Kernel::Thread::A0, NUM_SLOTS)
thread.set_info (Kernel::Thread::A1, NUM_CAPS)
kdebug_line ()
Kernel::Receiver receiver = mem.create_receiver ()
receiver.set_owner (thread.copy ())
Kernel::Cap call = receiver.create_call_capability ()
Kernel::Cap parent = Kernel::my_receiver.create_capability (++current_thread)
kdebug_line ()
caps.set (__receiver_num, receiver.copy ())
caps.set (__thread_num, thread.copy ())
caps.set (__memory_num, mem.copy ())
caps.set (__call_num, call.copy ())
caps.set (__parent_num, parent.copy ())
kdebug_line ()
thread.run ()
kdebug_line ()
Kernel::free_cap (receiver)
Kernel::free_cap (thread)
Kernel::free_cap (mem)
Kernel::free_cap (call)
Kernel::free_cap (parent)
kdebug_line ()
Kernel::print_caps ()
static void dump_devices ():
kdebug ("String: ")
kdebug_num (String::ID, 3)
kdebug ("\nWString: ")
kdebug_num (WString::ID, 3)
kdebug ("\nDevice: ")
kdebug_num (Device::ID, 3)
kdebug ("\nParent: ")
kdebug_num (Parent::ID, 3)
kdebug ("\nKeyboard: ")
kdebug_num (Keyboard::ID, 3)
kdebug ("\nBuzzer: ")
kdebug_num (Buzzer::ID, 3)
kdebug ("\nDisplay: ")
kdebug_num (Display::ID, 3)
kdebug ("\nSetting: ")
kdebug_num (Setting::ID, 3)
kdebug ("\nDirectory: ")
kdebug_num (Directory::ID, 3)
kdebug ("\nWDirectory: ")
kdebug_num (WDirectory::ID, 3)
kdebug ("\nFilesystem: ")
kdebug_num (Filesystem::ID, 3)
kdebug ("\nStream: ")
kdebug_num (Stream::ID, 3)
kdebug ("\n")
Kernel::free_cap (caps)
Kernel::Num start ():
// Wait for the debugging device to be active, in case there is one.
Kernel::schedule ()
init_alloc ()
dump_devices ()
top_memory = Kernel::get_top_memory ()
Directory root = receive_devices ()
root.lock_ro ()
kdebug_line ()
list_files (root)
kdebug_line ()
sort ()
kdebug_line ()
Kernel::Caps caps = Kernel::my_memory.create_caps (max_pages)
kdebug ("max pages: ")
kdebug_num (max_pages)
kdebug ("\n")
slot = caps.use ()
mapping = alloc_space (max_pages)
for unsigned i = 0; i < num_files; ++i:
kdebug_line ()
run (&files[index[i]], files[index[i]].name[0] == '#')
kdebug_line ()
kdebug_line ()
root.unlock_ro ()
kdebug_line ()
Kernel::free_slot (slot)
kdebug_line ()
Kernel::my_memory.destroy (caps)
kdebug_line ()
return 0
while true:
Kernel::wait ()
kdebug ("request!\n")

View File

@ -299,7 +299,6 @@ static void memory_invoke (unsigned cmd, unsigned target, Kernel::Num protected_
kPage *ret = mem->alloc_page ()
if ret:
reply_cap (CAPTYPE_PAGE | CAP_MASTER, (unsigned)ret, &ret->refs)
dbg_log ("page created\n")
else:
dpanic (0x33311992, "out of memory creating page")
reply_num (Kernel::ERR_OUT_OF_MEMORY)
@ -355,17 +354,18 @@ static void memory_invoke (unsigned cmd, unsigned target, Kernel::Num protected_
dpanic (0x52993341, "Trying to map foreign page")
reply_num (Kernel::ERR_INVALID_ARGUMENT)
return
bool readonly = c->data[1].l & (unsigned)c->arg->target & Kernel::Page::READONLY
mem->map (page, c->data[1].l & PAGE_MASK, readonly)
if c->data[1].l & (unsigned)c->arg->target & Kernel::Page::READONLY:
dbg_log ("Mapping readonly because capability is readonly\n")
page->flags |= Kernel::Page::MAPPED_READONLY
mem->map (page, c->data[1].l & PAGE_MASK)
break
case Kernel::Memory::MAPPING & REQUEST_MASK:
bool readonly
kPage *page = mem->get_mapping (c->data[1].l, &readonly)
kPage *page = mem->get_mapping (c->data[1].l)
if !page:
reply_num (Kernel::ERR_UNMAPPED_READ)
return
unsigned t = CAPTYPE_PAGE | CAP_MASTER
if readonly:
if page->flags & Kernel::Page::MAPPED_READONLY:
t |= Kernel::Page::READONLY
reply_cap (t, (unsigned)page, &page->refs)
return
@ -554,14 +554,14 @@ static void thread_invoke (unsigned cmd, unsigned target, Kernel::Num protected_
reply_num (0)
return
static bool page_check_payment (kPage *page):
static void page_check_payment (kPage *page):
kPage *p
for p = page->share_prev; p; p = p->share_prev:
if p->flags & Kernel::Page::PAYING:
return true
return
for p = page->share_next; p; p = p->share_next:
if p->flags & Kernel::Page::PAYING:
return true
return
// No kPage is paying for this frame anymore.
raw_pfree (page->frame)
kPage *next
@ -577,7 +577,6 @@ static bool page_check_payment (kPage *page):
p->share_next = NULL
p->flags &= ~(Kernel::Page::SHARED | Kernel::Page::FRAME)
kPage_arch_update_mapping (p)
return false
static void page_invoke (unsigned cmd, unsigned target, Kernel::Num protected_data, kCapability::Context *c):
kPage *page = (kPage *)protected_data.l
@ -599,9 +598,11 @@ static void page_invoke (unsigned cmd, unsigned target, Kernel::Num protected_da
t->flags |= Kernel::Page::READONLY
if !(page->flags & Kernel::Page::FRAME):
dpanic (0, "sharing nothing results in lost page")
kPage_arch_update_mapping (t)
break
if c->data[0].h & Kernel::Page::COPY:
if ~t->flags & Kernel::Page::PAYING:
kPage_arch_update_mapping (t)
break
if !(c->data[0].h & Kernel::Page::FORGET) || page->flags & Kernel::Page::SHARED:
unsigned *d = (unsigned *)page->frame
@ -629,10 +630,10 @@ static void page_invoke (unsigned cmd, unsigned target, Kernel::Num protected_da
page->frame = NULL
page->flags &= ~Kernel::Page::FRAME
kPage_arch_update_mapping (page)
kPage_arch_update_mapping (t)
else:
if t == page:
dpanic (0, "sharing page with itself")
kPage_arch_update_mapping (t)
break
if c->data[0].h & Kernel::Page::FORGET:
if ~page->flags & Kernel::Page::SHARED:
@ -661,7 +662,7 @@ static void page_invoke (unsigned cmd, unsigned target, Kernel::Num protected_da
t->share_prev->share_next = t
t->frame = page->frame
t->flags |= Kernel::Page::FRAME
kPage_arch_update_mapping (t)
kPage_arch_update_mapping (t)
break
case Kernel::Page::SET_FLAGS & REQUEST_MASK:
if cmd & Kernel::Page::READONLY:
@ -679,8 +680,7 @@ static void page_invoke (unsigned cmd, unsigned target, Kernel::Num protected_da
if ~page->flags & old & Kernel::Page::PAYING:
// Decrease the use counter in any case.
page->address_space->unuse ()
if !page_check_payment (page):
page->flags &= ~Kernel::Page::FRAME
page_check_payment (page)
// If we start paying, increase the use counter.
if page->flags & ~old & Kernel::Page::PAYING:
@ -694,19 +694,10 @@ static void page_invoke (unsigned cmd, unsigned target, Kernel::Num protected_da
// If we want a frame, see if we can get it.
if ~old & page->flags & Kernel::Page::FRAME:
kPage *p
for p = page; p; p = p->share_prev:
if p->flags & Kernel::Page::PAYING:
break
if !p:
for p = page->share_next; p; p = p->share_next:
if p->flags & Kernel::Page::PAYING:
break
if !p:
dpanic (0, "cannot have frame without payer")
page->flags &= ~Kernel::Page::FRAME
// If we can get the new frame, get it.
if ~old & page->flags & Kernel::Page::FRAME:
if ~page->flags & Kernel::Page::PAYING:
dpanic (0, "cannot have frame without paying")
page->flags &= ~Kernel::Page::FRAME
else:
page->frame = page->address_space->zalloc ()
kPage_arch_update_mapping (page)
break

View File

@ -447,10 +447,12 @@ namespace Kernel:
FRAME = 4
// A readonly page cannot be written to. This flag can not be reset while the frame is shared. The flag is already defined in request.
//READONLY = 8
// If this flag is set, the page is or will be mapped read-only.
MAPPED_READONLY = 0x10
// This is a read-only flag, saying if this is physical memory, which mustn't be freed.
PHYSICAL = 0x10
PHYSICAL = 0x20
// This is a read-only flag, saying if this is uncachable memory.
UNCACHED = 0x20
UNCACHED = 0x40
void share (Cap target, unsigned flags):
ocall (target, Kernel::Num (CAP_MASTER_DIRECT | SHARE, flags))
unsigned get_flags ():
@ -538,9 +540,7 @@ namespace Kernel:
void destroy (Cap target):
ocall (target, CAP_MASTER_DIRECT | DESTROY)
// TODO: LIST
bool map (Cap page, unsigned address, bool readonly = false):
if readonly:
address |= Page::READONLY
bool map (Cap page, unsigned address):
return ocall (page, CAP_MASTER_DIRECT | MAP, address).l != ~0
Page mapping (void *address):
icall (CAP_MASTER_DIRECT | MAPPING, Num ((unsigned)address))

View File

@ -115,6 +115,8 @@ struct _slot_data:
kThreadP thread
unsigned index
// The order of the members is defined in arch.hh. It must be first an object which cannot be ~0, then
// pc, sp and arch. After that everything is allowed.
struct kThread : public kObject:
kReceiverP receivers
unsigned pc, sp
@ -167,6 +169,7 @@ struct kReceiver : public kObject:
struct kPage : public kObject:
unsigned frame
unsigned flags
unsigned mapping
kPageP share_first
kPageP share_prev, share_next
kPage_arch arch
@ -213,9 +216,9 @@ struct kMemory : public kObject:
unsigned limit, used
kMemory_arch arch
inline bool map (kPage *page, unsigned address, bool readonly)
inline void unmap (kPage *page, unsigned address)
inline kPage *get_mapping (unsigned address, bool *readonly)
inline bool map (kPage *page, unsigned address)
inline void unmap (kPage *page)
inline kPage *get_mapping (unsigned address)
// Allocation of pages.
bool use (unsigned num = 1)
@ -300,20 +303,20 @@ void kThread_arch_receive (kThread *thread, Kernel::Num protected_data, Kernel::
unsigned *kThread_arch_info (kThread *thread, unsigned num)
void kMemory_arch_init (kMemory *mem)
void kMemory_arch_free (kMemory *mem)
bool kMemory_arch_map (kMemory *mem, kPage *page, unsigned address, bool write)
void kMemory_arch_unmap (kMemory *mem, kPage *page, unsigned address)
kPage *kMemory_arch_get_mapping (kMemory *mem, unsigned address, bool *readonly)
bool kMemory_arch_map (kMemory *mem, kPage *page, unsigned address)
void kMemory_arch_unmap (kMemory *mem, kPage *page)
kPage *kMemory_arch_get_mapping (kMemory *mem, unsigned address)
void kPage_arch_update_mapping (kPage *page)
void arch_register_interrupt (unsigned num, kReceiverP r)
void arch_reboot ()
void arch_uncache_page (unsigned page)
bool kMemory::map (kPage *page, unsigned address, bool readonly = false):
return kMemory_arch_map (this, page, address, readonly)
void kMemory::unmap (kPage *page, unsigned address):
kMemory_arch_unmap (this, page, address)
kPage *kMemory::get_mapping (unsigned address, bool *readonly):
return kMemory_arch_get_mapping (this, address, readonly)
bool kMemory::map (kPage *page, unsigned address):
return kMemory_arch_map (this, page, address)
void kMemory::unmap (kPage *page):
kMemory_arch_unmap (this, page)
kPage *kMemory::get_mapping (unsigned address):
return kMemory_arch_get_mapping (this, address)
kCapability *kCapRef::deref ():
return caps ? caps->cap (index) : NULL
void kCapRef::clone (kCapRef source, bool copy):

View File

@ -114,83 +114,31 @@ void kMemory_arch_init (kMemory *mem):
mem->arch.directory = NULL
mem->arch.shadow = NULL
void kMemory_arch_free (kMemory *mem):
//dbg_log_line ()
while mem->arch.first_page_table:
mem->unmap (mem->arch.first_page_table->first_page->page, mem->arch.first_page_table->first_page->mapping)
if (kMemory *)asids[mem->arch.asid] == mem:
flush_tlb (mem->arch.asid)
asids[mem->arch.asid] = asids[0]
asids[0] = mem->arch.asid
mem->unuse ()
mem->zfree ((unsigned)mem->arch.directory)
static arch_page_table *alloc_page_table (kMemory *mem):
//dbg_log_line ()
arch_page_table *ret = (arch_page_table *)mem->search_free (sizeof (arch_page_table), (void **)&mem->arch.first_page_table)
if !ret:
return NULL
ret->first_page = NULL
return ret
static arch_page *alloc_page (kMemory *mem, arch_page_table *t):
//dbg_log_line ()
arch_page *ret = (arch_page *)mem->search_free (sizeof (arch_page), (void **)&t->first_page)
if !ret:
return NULL
ret->page = NULL
ret->mapping = ~0
ret->prev_mapped = NULL
ret->next_mapped = NULL
return ret
static void free_page_table (arch_page_table *t, unsigned idx):
//dbg_log_line ()
kMemory *mem = t->address_space
mem->zfree ((unsigned)mem->arch.directory[idx])
mem->arch.directory[idx] = NULL
mem->arch.shadow[idx] = NULL
mem->free_obj (t, (void **)&mem->arch.first_page_table)
if !mem->arch.first_page_table:
mem->zfree ((unsigned)mem->arch.directory)
mem->zfree ((unsigned)mem->arch.shadow)
mem->arch.directory = NULL
mem->arch.shadow = NULL
static void tlb_reset (unsigned address, unsigned asid, unsigned value):
static void tlb_reset (kMemory *mem, unsigned address, unsigned value):
//dbg_log_line ()
unsigned asid = mem->arch.asid
if asids[asid] != (unsigned)mem:
//dbg_log ("not resetting tlb, because the asid is not in use.\n")
return
//dbg_log ("resetting tlb for ")
//dbg_log_num (address)
//dbg_log ("\n")
cp0_set (CP0_ENTRY_HI, address | asid)
__asm__ volatile ("tlbp")
unsigned idx
cp0_get (CP0_INDEX, idx)
if ~idx & 0x80000000:
__asm__ volatile ("tlbr")
if address & (1 << PAGE_BITS):
cp0_set (CP0_ENTRY_LO1, value)
else:
cp0_set (CP0_ENTRY_LO0, value)
__asm__ volatile ("tlbwi")
static void free_page (arch_page_table *t, arch_page *p):
//dbg_log_line ()
if !p:
dpanic (0, "freeing page 0")
return
if p->prev_mapped:
p->prev_mapped->next_mapped = p->next_mapped
else:
p->page->arch.first_mapped = p->next_mapped
if p->next_mapped:
p->next_mapped->prev_mapped = p->prev_mapped
tlb_reset (p->mapping, p->address_space->arch.asid, 0)
unsigned idx = p->mapping >> 21
p->address_space->free_obj (p, (void **)&t->first_page)
if !t->first_page:
free_page_table (t, idx)
static unsigned make_entry_lo (kPage *page, bool readonly):
static unsigned make_entry_lo (kPage *page):
//dbg_log_line ()
if !page->frame:
dbg_log ("not mapping because there is no frame\n")
//dbg_log ("not mapping because there is no frame\n")
return 0
unsigned flags
if page->flags & Kernel::Page::UNCACHED:
@ -198,121 +146,108 @@ static unsigned make_entry_lo (kPage *page, bool readonly):
else:
// 18 is write-back cache; 00 is write-through cache.
flags = 0x18 | 0x2
if !readonly:
if ~page->flags & Kernel::Page::MAPPED_READONLY:
flags |= 0x4
return ((page->frame & ~0x80000000) >> 6) | flags
bool kMemory_arch_map (kMemory *mem, kPage *page, unsigned address, bool readonly):
bool kMemory_arch_map (kMemory *mem, kPage *page, unsigned address):
//dbg_log_line ()
if address >= 0x80000000:
dpanic (address, "trying to map to kernel address")
return false
address &= PAGE_MASK
if page->mapping != ~0:
mem->unmap (page)
if address == ~0:
return true
if address & ~PAGE_MASK:
dpanic (address, "mapping not page-aligned")
address &= PAGE_MASK
if !mem->arch.directory:
mem->arch.directory = (unsigned **)mem->zalloc ()
//dbg_log ("creating directory\n")
mem->arch.directory = (Table **)mem->zalloc ()
if !mem->arch.directory:
dpanic (0, "unable to allocate directory")
return false
mem->arch.shadow = (arch_page_table **)mem->zalloc ()
mem->arch.shadow = (kPageP *)mem->zalloc ()
if !mem->arch.shadow:
dpanic (0, "unable to allocate shadow directory")
mem->zfree ((unsigned)mem->arch.directory)
mem->arch.directory = NULL
return false
unsigned *table = mem->arch.directory[address >> 21]
arch_page_table *t = mem->arch.shadow[address >> 21]
Table *table = mem->arch.directory[address >> 21]
if !table:
table = (unsigned *)mem->zalloc ()
//dbg_log ("creating table\n")
table = (Table *)mem->zalloc ()
if !table:
if !mem->arch.first_page_table:
mem->zfree ((unsigned)mem->arch.directory)
mem->zfree ((unsigned)mem->arch.shadow)
mem->arch.directory = NULL
mem->arch.shadow = NULL
return false
t = alloc_page_table (mem)
if !t:
mem->zfree ((unsigned)table)
if !mem->arch.first_page_table:
mem->zfree ((unsigned)mem->arch.directory)
mem->zfree ((unsigned)mem->arch.shadow)
mem->arch.directory = NULL
mem->arch.shadow = NULL
dpanic (0, "unable to allocate table")
//if mem->arch.first_table == ~0:
// mem->zfree ((unsigned)mem->arch.directory)
// mem->zfree ((unsigned)mem->arch.shadow)
// mem->arch.directory = NULL
// mem->arch.shadow = NULL
return false
mem->arch.directory[address >> 21] = table
mem->arch.shadow[address >> 21] = t
arch_page *p = alloc_page (mem, t)
if !p:
if !t->first_page:
// This automatically cleans up the rest.
free_page_table (t, address >> 21)
return false
unsigned idx = (address >> 12) & ((1 << 9) - 1)
if table[idx]:
if table->entrylo[idx]:
dbg_log ("page already mapped: ")
dbg_log_num (idx, 3)
dbg_log (";")
dbg_log_num (table[idx])
dbg_log_num (table->entrylo[idx])
dbg_log ("/")
dbg_log_num (table[idx + 0x200])
dbg_log_num ((unsigned)table->page[idx])
dbg_log (" table: ")
dbg_log_num ((unsigned)table)
dbg_log ("\n")
mem->unmap ((kPage *)table[idx + 0x200], address)
table[idx] = make_entry_lo (page, readonly)
table[idx + 0x200] = (unsigned)p
dbg_log ("mapped at address ")
dbg_log_num (address)
dbg_log_char ('\n')
p->mapping = address + readonly
p->page = page
p->next_mapped = page->arch.first_mapped
if p->next_mapped:
p->next_mapped->prev_mapped = p
page->arch.first_mapped = p
mem->unmap (table->page[idx])
table->entrylo[idx] = make_entry_lo (page)
table->page[idx] = page
//dbg_log ("mapped at address ")
//dbg_log_num (address)
//dbg_log_char ('\n')
page->mapping = address
page->arch.next_mapped = mem->arch.shadow[address >> 21]
if page->arch.next_mapped:
page->arch.next_mapped->arch.prev_mapped = page
mem->arch.shadow[address >> 21] = page
tlb_reset (mem, address, table->entrylo[idx])
return true
void kMemory_arch_unmap (kMemory *mem, kPage *page, unsigned address):
void kMemory_arch_unmap (kMemory *mem, kPage *page):
//dbg_log_line ()
unsigned didx = address >> 21
unsigned tidx = (address >> 12) & ((1 << 9) - 1)
unsigned *table = mem->arch.directory[didx]
arch_page_table *t = mem->arch.shadow[didx]
table[tidx] = 0
arch_page *p = (arch_page *)table[tidx + 0x200]
table[tidx + 0x200] = 0
free_page (t, p)
unsigned didx = page->mapping >> 21
unsigned tidx = (page->mapping >> 12) & ((1 << 9) - 1)
Table *table = mem->arch.directory[didx]
table->entrylo[tidx] = 0
table->page[tidx] = NULL
if page->arch.next_mapped:
page->arch.next_mapped->arch.prev_mapped = page->arch.prev_mapped
if page->arch.prev_mapped:
page->arch.prev_mapped->arch.next_mapped = page->arch.next_mapped
else:
mem->arch.shadow[didx] = page->arch.next_mapped
page->arch.prev_mapped = NULL
page->arch.next_mapped = NULL
tlb_reset (mem, page->mapping, 0)
page->mapping = ~0
kPage *kMemory_arch_get_mapping (kMemory *mem, unsigned address, bool *readonly):
kPage *kMemory_arch_get_mapping (kMemory *mem, unsigned address):
//dbg_log_line ()
if address >= 0x80000000 || !mem->arch.directory:
return NULL
unsigned *table = mem->arch.directory[address >> 21]
Table *table = mem->arch.directory[address >> 21]
if !table:
return NULL
unsigned idx = (address >> 12) & ((1 << 9) - 1)
arch_page *page = (arch_page *)table[idx + 0x200]
if !page:
return NULL
if readonly:
*readonly = !(table[idx] & 4)
return page->page
return table->page[(address >> 12) & ((1 << 9) - 1)]
void kPage_arch_update_mapping (kPage *page):
//dbg_log_line ()
if !page->arch.first_mapped:
if page->mapping == ~0:
return
kMemory *as = page->address_space
unsigned target = make_entry_lo (page, page->flags & Kernel::Page::READONLY)
for arch_page *p = page->arch.first_mapped; p; p = p->next_mapped:
unsigned de = p->mapping >> 21
unsigned te = (p->mapping >> 12) & ((1 << 9) - 1)
bool readonly = p->mapping & 1
unsigned t
if readonly:
t = target & ~0x4
else:
t = target
as->arch.directory[de][te] = t
tlb_reset (p->mapping & ~1, as->arch.asid, t)
unsigned target = make_entry_lo (page)
unsigned de = page->mapping >> 21
unsigned te = (page->mapping >> 12) & ((1 << 9) - 1)
page->address_space->arch.directory[de]->entrylo[te] = target
tlb_reset (page->address_space, page->mapping, target)
typedef unsigned cacheline[8]
void arch_uncache_page (unsigned page):

View File

@ -93,32 +93,29 @@ struct kThread_arch:
unsigned at, v[2], a[4], t[10], s[8], gp, fp, ra, hi, lo, k[2]
// The following is used for page mapping.
// Each Memory has a two directories with 0x400 entries,
// page tables and mapping page tables. Mapping page tables are
// pages which contain 0x200 EntryLo. values and 0x200 Page pointers.
// For a virtual address, bits 0-11 are in the physical address,
// bits 12-20 are an index in the page table, bits 21-30
// are an index in the page directory and bit 31 is always 0.
struct arch_page : public kObject :
kPage *page
unsigned mapping
arch_page *prev_mapped, *next_mapped
struct arch_page_table : public kObject :
arch_page *first_page
// Each Memory has a directory with 0x400 page tables.
// Page tables are pages which contain 0x200 EntryLo. values and 0x200
// kPage pointers.
// For a virtual address, bits 0-11 are in the physical address, bits 12-20 are
// an index in the page table, bits 21-30 are an index in the page directory
// and bit 31 is always 0.
struct kPage_arch:
arch_page *first_mapped
kPageP prev_mapped, next_mapped
struct Table:
unsigned entrylo[0x200]
kPage *page[0x200]
struct kMemory_arch:
unsigned asid
unsigned **directory
arch_page_table **shadow
arch_page_table *first_page_table
Table **directory
kPageP *shadow
unsigned first_table
// Pointers to kMemory when asid is taken, index of next free, or 0, if free.
// asid[0] is used as index to first free asid.
// asid value 0 is only used by the idle task.
EXTERN unsigned asids[64]
EXTERN kReceiverP arch_interrupt_receiver[32]
@ -143,7 +140,7 @@ extern "C":
extern unsigned thread_start[NUM_THREADS + 1]
#endif
// Fast pointer to page directory, for tlb miss events
extern unsigned **directory
extern Table **directory
#endif // not defined ASM

View File

@ -44,7 +44,7 @@ static void init_idle ():
idle_memory.memories = NULL
idle_memory.limit = 0
idle_memory.used = 0
idle_memory.arch.directory = (unsigned **)0x80000000
idle_memory.arch.directory = (Table **)0x80000000
// Shadow is never used for the idle task.
idle_memory.arch.shadow = NULL
idle_memory.arch.asid = 0
@ -125,7 +125,6 @@ static void init_threads ():
#ifndef NDEBUG
thread->id = i
#endif
kPage **pages = (kPage **)mem->zalloc ()
Elf32_Ehdr *header = (Elf32_Ehdr *)thread_start[i]
for unsigned j = 0; j < SELFMAG; ++j:
if header->e_ident[j] != ELFMAG[j]:
@ -148,6 +147,7 @@ static void init_threads ():
return
thread->pc = header->e_entry
thread->sp = 0x80000000
unsigned *used = (unsigned *)mem->zalloc ()
for unsigned section = 0; section < header->e_shnum; ++section:
Elf32_Shdr *shdr = (Elf32_Shdr *)(thread_start[i] + header->e_shoff + section * header->e_shentsize)
if ~shdr->sh_flags & SHF_ALLOC:
@ -162,30 +162,35 @@ static void init_threads ():
for unsigned p = (shdr->sh_addr & PAGE_MASK); p < shdr->sh_addr + shdr->sh_size; p += PAGE_SIZE:
unsigned section_offset = (p - (shdr->sh_addr & PAGE_MASK)) >> PAGE_BITS
unsigned idx = file_offset + section_offset
kPage *page = mem->get_mapping (p, &readonly)
kPage *page = mem->get_mapping (p)
if page:
if !pages[idx]:
panic (0, "multiple pages mapped to one address in initial file")
return
if pages[idx]->frame != page->frame:
if page->frame != thread_start[i] + (idx << PAGE_BITS):
panic (0, "different pages mapped to one address in intitial file")
return
continue
if !pages[idx]:
pages[idx] = mem->alloc_page ()
pages[idx]->frame = thread_start[i] + (idx << PAGE_BITS)
pages[idx]->flags = Kernel::Page::PAYING | Kernel::Page::FRAME
++top_memory.limit
mem->use ()
if !mem->map (pages[idx], p, readonly):
used[idx] = 1
page = mem->alloc_page ()
page->frame = thread_start[i] + (idx << PAGE_BITS)
page->flags = Kernel::Page::PAYING | Kernel::Page::FRAME
if readonly:
page->flags |= Kernel::Page::MAPPED_READONLY
if !mem->map (page, p):
panic (0x22446688, "unable to map initial page")
return
//dbg_log ("mapped page ")
//if readonly:
// dbg_log ("as readonly ")
//dbg_log ("at address ")
//dbg_log_num (p)
//dbg_log (" for ")
//dbg_log_num (i, 1)
//dbg_log_char ('\n')
else:
if readonly:
panic (0x33399993, "unwritable bss section")
return
for unsigned p = (shdr->sh_addr & PAGE_MASK); p < shdr->sh_addr + shdr->sh_size; p += PAGE_SIZE:
kPage *page = mem->get_mapping (p, &readonly)
kPage *page = mem->get_mapping (p)
if !page:
page = mem->alloc_page ()
if !page:
@ -199,8 +204,13 @@ static void init_threads ():
if !mem->map (page, p):
panic (0x33557799, "unable to map initial bss page")
return
dbg_log ("mapped bss page at address ")
dbg_log_num (p)
dbg_log (" for ")
dbg_log_num (i, 1)
dbg_log_char ('\n')
else:
if readonly:
if page->flags & Kernel::Page::MAPPED_READONLY:
panic (0x20203030, "bss section starts on read-only page")
return
for unsigned a = p; a < ((p + PAGE_SIZE) & PAGE_MASK); a += 4:
@ -210,10 +220,12 @@ static void init_threads ():
continue
((unsigned *)page->frame)[(a & ~PAGE_MASK) >> 2] = 0
for unsigned p = 0; p <= ((thread_start[i + 1] - thread_start[i] - 1) >> PAGE_BITS); ++p:
if pages[p]:
continue
++top_memory.limit
if used[p]:
mem->use ()
continue
top_memory.pfree (thread_start[i] + (p << PAGE_BITS))
mem->pfree ((unsigned)used)
kPage *stackpage = mem->alloc_page ()
stackpage->frame = mem->zalloc ()
stackpage->flags = Kernel::Page::PAYING | Kernel::Page::FRAME
@ -242,7 +254,6 @@ static void init_threads ():
thread->schedule_prev = previous
thread->schedule_next = NULL
previous = thread
mem->pfree ((unsigned)pages)
// Initialize the kernel, finish by falling into the idle task.
void init (unsigned mem):

View File

@ -73,7 +73,7 @@ kThread *tlb_refill ():
return current
unsigned EntryHi
cp0_get (CP0_ENTRY_HI, EntryHi)
unsigned *t = directory[EntryHi >> 21]
Table *t = directory[EntryHi >> 21]
if !t:
unsigned addr
cp0_get (CP0_BAD_V_ADDR, addr)
@ -81,9 +81,16 @@ kThread *tlb_refill ():
else:
// - 2 instead of - 1 means reset bit 0
unsigned idx = (EntryHi >> 12) & ((1 << 9) - 2)
cp0_set (CP0_ENTRY_LO0, t[idx])
cp0_set (CP0_ENTRY_LO1, t[idx + 1])
cp0_set (CP0_ENTRY_LO0, t->entrylo[idx])
cp0_set (CP0_ENTRY_LO1, t->entrylo[idx + 1])
__asm__ volatile ("tlbwr")
//dbg_log ("tlb refill ")
//dbg_log_num (t->entrylo[idx])
//dbg_log (":")
//dbg_log_num (t->entrylo[idx + 1])
//dbg_log (" for ")
//dbg_log_num (EntryHi)
//dbg_log ("\n")
handle_exit ()
return current

View File

@ -46,6 +46,29 @@ void dbg_log_num (unsigned num, unsigned digits):
#if 1 || defined (NDEBUG)
static void panic_message (unsigned n, const char *line, char const *name, char const *message):
//unsigned addr
//cp0_get (CP0_BAD_V_ADDR, addr)
//unsigned de = addr >> 21
//unsigned te = (addr >> 12) & ((1 << 9) - 1)
//dbg_log_num ((unsigned)old_current->address_space->arch.directory[de]->page[te])
//dbg_log (":")
//dbg_log_num (old_current->address_space->arch.directory[de]->entrylo[te])
//dbg_log ("\n")
//__asm__ volatile ("tlbp")
//unsigned idx, hi
//cp0_get (CP0_INDEX, idx)
//dbg_log_num (idx)
//dbg_log (":")
//cp0_get (CP0_ENTRY_HI, hi)
//dbg_log_num (hi)
//dbg_log (":")
//unsigned lo
//cp0_get (CP0_ENTRY_LO0, lo)
//dbg_log_num (lo)
//dbg_log (":")
//cp0_get (CP0_ENTRY_LO1, lo)
//dbg_log_num (lo)
//dbg_log ("\n")
dbg_log ("Panic: caller = ")
if old_current:
dbg_log_num (old_current->id, 2)