1
0
mirror of git://projects.qi-hardware.com/iris.git synced 2024-12-29 20:04:34 +02:00
iris/mips/arch.ccp
2009-12-30 22:41:45 +01:00

325 lines
9.2 KiB
COBOL

#pypp 0
// Iris: micro-kernel for a capability-based operating system.
// mips/arch.ccp: Most mips-specific parts.
// Copyright 2009 Bas Wijnen <wijnen@debian.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
#define ARCH
#include "kernel.hh"
void kThread_arch_init (kThread *thread):
thread->arch.at = 0
for unsigned i = 0; i < 2; ++i:
thread->arch.v[i] = 0
thread->arch.k[i] = 0
for unsigned i = 0; i < 4; ++i:
thread->arch.a[i] = 0
for unsigned i = 0; i < 10; ++i:
thread->arch.t[i] = 0
thread->arch.gp = 0
thread->arch.fp = 0
thread->arch.ra = 0
thread->arch.hi = 0
thread->arch.lo = 0
void kThread_arch_receive (kThread *thread, Kernel::Num protected_data, Kernel::Num *data):
thread->arch.a[0] = data[0].l
thread->arch.a[1] = data[0].h
thread->arch.a[2] = data[1].l
thread->arch.a[3] = data[1].h
thread->arch.t[0] = protected_data.l
thread->arch.t[1] = protected_data.h
unsigned *kThread_arch_info (kThread *thread, unsigned num):
switch num:
case 1:
return &thread->arch.at
case 2:
return &thread->arch.v[0]
case 3:
return &thread->arch.v[1]
case 4:
return &thread->arch.a[0]
case 5:
return &thread->arch.a[1]
case 6:
return &thread->arch.a[2]
case 7:
return &thread->arch.a[3]
case 8:
return &thread->arch.t[0]
case 9:
return &thread->arch.t[1]
case 10:
return &thread->arch.t[2]
case 11:
return &thread->arch.t[3]
case 12:
return &thread->arch.t[4]
case 13:
return &thread->arch.t[5]
case 14:
return &thread->arch.t[6]
case 15:
return &thread->arch.t[7]
case 16:
return &thread->arch.s[0]
case 17:
return &thread->arch.s[1]
case 18:
return &thread->arch.s[2]
case 19:
return &thread->arch.s[3]
case 20:
return &thread->arch.s[4]
case 21:
return &thread->arch.s[5]
case 22:
return &thread->arch.s[6]
case 23:
return &thread->arch.s[7]
case 24:
return &thread->arch.t[8]
case 25:
return &thread->arch.t[9]
case 26:
return &thread->arch.k[0]
case 27:
return &thread->arch.k[1]
case 28:
return &thread->arch.gp
case 29:
return &thread->sp
case 30:
return &thread->arch.fp
case 31:
return &thread->arch.ra
default:
return NULL
void kMemory_arch_init (kMemory *mem):
mem->arch.asid = 1
mem->arch.directory = NULL
mem->arch.shadow = NULL
void kMemory_arch_free (kMemory *mem):
//dbg_log_line ()
while mem->arch.first_page_table:
mem->unmap (mem->arch.first_page_table->first_page->page, mem->arch.first_page_table->first_page->mapping)
if (kMemory *)asids[mem->arch.asid] == mem:
flush_tlb (mem->arch.asid)
asids[mem->arch.asid] = asids[0]
asids[0] = mem->arch.asid
mem->unuse ()
mem->zfree ((unsigned)mem->arch.directory)
static arch_page_table *alloc_page_table (kMemory *mem):
//dbg_log_line ()
arch_page_table *ret = (arch_page_table *)mem->search_free (sizeof (arch_page_table), (void **)&mem->arch.first_page_table)
if !ret:
return NULL
ret->first_page = NULL
return ret
static arch_page *alloc_page (kMemory *mem, arch_page_table *t):
//dbg_log_line ()
arch_page *ret = (arch_page *)mem->search_free (sizeof (arch_page), (void **)&t->first_page)
if !ret:
return NULL
ret->page = NULL
ret->mapping = ~0
ret->prev_mapped = NULL
ret->next_mapped = NULL
return ret
static void free_page_table (arch_page_table *t, unsigned idx):
//dbg_log_line ()
kMemory *mem = t->address_space
mem->zfree ((unsigned)mem->arch.directory[idx])
mem->arch.directory[idx] = NULL
mem->arch.shadow[idx] = NULL
mem->free_obj (t, (void **)&mem->arch.first_page_table)
if !mem->arch.first_page_table:
mem->zfree ((unsigned)mem->arch.directory)
mem->zfree ((unsigned)mem->arch.shadow)
mem->arch.directory = NULL
mem->arch.shadow = NULL
static void tlb_reset (unsigned address, unsigned asid, unsigned value):
//dbg_log_line ()
cp0_set (CP0_ENTRY_HI, address | asid)
__asm__ volatile ("tlbp")
unsigned idx
cp0_get (CP0_INDEX, idx)
if ~idx & 0x80000000:
if address & (1 << PAGE_BITS):
cp0_set (CP0_ENTRY_LO1, value)
else:
cp0_set (CP0_ENTRY_LO0, value)
__asm__ volatile ("tlbwi")
static void free_page (arch_page_table *t, arch_page *p):
//dbg_log_line ()
if !p:
dpanic (0, "freeing page 0")
return
if p->prev_mapped:
p->prev_mapped->next_mapped = p->next_mapped
else:
p->page->arch.first_mapped = p->next_mapped
if p->next_mapped:
p->next_mapped->prev_mapped = p->prev_mapped
tlb_reset (p->mapping, p->address_space->arch.asid, 0)
unsigned idx = p->mapping >> 21
p->address_space->free_obj (p, (void **)&t->first_page)
if !t->first_page:
free_page_table (t, idx)
static unsigned make_entry_lo (kPage *page, bool readonly):
//dbg_log_line ()
if !page->frame:
return 0
unsigned flags
if page->flags & Kernel::Page::UNCACHED:
flags = 0x10 | 0x2
else:
// 18 is write-back cache; 00 is write-through cache.
flags = 0x18 | 0x2
if !readonly:
flags |= 0x4
return ((page->frame & ~0x80000000) >> 6) | flags
bool kMemory_arch_map (kMemory *mem, kPage *page, unsigned address, bool readonly):
//dbg_log_line ()
if address >= 0x80000000:
panic (0x32134293, "trying to map to kernel address")
return false
address &= PAGE_MASK
if !mem->arch.directory:
mem->arch.directory = (unsigned **)mem->zalloc ()
if !mem->arch.directory:
return false
mem->arch.shadow = (arch_page_table **)mem->zalloc ()
if !mem->arch.shadow:
mem->zfree ((unsigned)mem->arch.directory)
mem->arch.directory = NULL
return false
unsigned *table = mem->arch.directory[address >> 21]
arch_page_table *t = mem->arch.shadow[address >> 21]
if !table:
table = (unsigned *)mem->zalloc ()
if !table:
if !mem->arch.first_page_table:
mem->zfree ((unsigned)mem->arch.directory)
mem->zfree ((unsigned)mem->arch.shadow)
mem->arch.directory = NULL
mem->arch.shadow = NULL
return false
t = alloc_page_table (mem)
if !t:
mem->zfree ((unsigned)table)
if !mem->arch.first_page_table:
mem->zfree ((unsigned)mem->arch.directory)
mem->zfree ((unsigned)mem->arch.shadow)
mem->arch.directory = NULL
mem->arch.shadow = NULL
return false
mem->arch.directory[address >> 21] = table
mem->arch.shadow[address >> 21] = t
arch_page *p = alloc_page (mem, t)
if !p:
if !t->first_page:
// This automatically cleans up the rest.
free_page_table (t, address >> 21)
return false
unsigned idx = (address >> 12) & ((1 << 9) - 1)
if table[idx]:
dbg_log ("page already mapped: ")
dbg_log_num (idx, 3)
dbg_log (";")
dbg_log_num (table[idx])
dbg_log ("/")
dbg_log_num (table[idx + 0x200])
dbg_log (" table: ")
dbg_log_num ((unsigned)table)
dbg_log ("\n")
mem->unmap ((kPage *)table[idx + 0x200], address)
table[idx] = make_entry_lo (page, readonly)
table[idx + 0x200] = (unsigned)p
p->mapping = address + readonly
p->page = page
p->next_mapped = page->arch.first_mapped
if p->next_mapped:
p->next_mapped->prev_mapped = p
page->arch.first_mapped = p
return true
void kMemory_arch_unmap (kMemory *mem, kPage *page, unsigned address):
//dbg_log_line ()
unsigned didx = address >> 21
unsigned tidx = (address >> 12) & ((1 << 9) - 1)
unsigned *table = mem->arch.directory[didx]
arch_page_table *t = mem->arch.shadow[didx]
table[tidx] = 0
arch_page *p = (arch_page *)table[tidx + 0x200]
table[tidx + 0x200] = 0
free_page (t, p)
kPage *kMemory_arch_get_mapping (kMemory *mem, unsigned address, bool *readonly):
//dbg_log_line ()
if address >= 0x80000000 || !mem->arch.directory:
return NULL
unsigned *table = mem->arch.directory[address >> 21]
if !table:
return NULL
unsigned idx = (address >> 12) & ((1 << 9) - 1)
arch_page *page = (arch_page *)table[idx + 0x200]
if !page:
return NULL
if readonly:
*readonly = !(table[idx] & 4)
return page->page
void kPage_arch_update_mapping (kPage *page):
//dbg_log_line ()
if !page->arch.first_mapped:
return
kMemory *as = page->address_space
unsigned target = make_entry_lo (page, page->flags & Kernel::Page::READONLY)
for arch_page *p = page->arch.first_mapped; p; p = p->next_mapped:
unsigned de = p->mapping >> 21
unsigned te = (p->mapping >> 12) & ((1 << 9) - 1)
bool readonly = p->mapping & 1
unsigned t
if readonly:
t = target & ~0x4
else:
t = target
as->arch.directory[de][te] = t
tlb_reset (p->mapping & ~1, as->arch.asid, t)
typedef unsigned cacheline[8]
void arch_uncache_page (unsigned page):
for cacheline *line = (cacheline *)page; line < (cacheline *)(page + PAGE_SIZE); ++line:
__asm__ volatile ("lw $k0, %0; cache 0x10, 0($k0); cache 0x11, 0($k0)" :: "m"(line))
void arch_register_interrupt (unsigned num, kReceiver *r):
arch_interrupt_receiver[num] = r
// And enable or disable the interrupt.
if r:
intc_unmask_irq (num)
else:
intc_mask_irq (num)