1
0
mirror of git://projects.qi-hardware.com/iris.git synced 2024-11-05 16:41:33 +02:00
iris/mips/arch.ccp
2010-09-09 21:48:35 +02:00

296 lines
8.1 KiB
COBOL

#pypp 0
// Iris: micro-kernel for a capability-based operating system.
// mips/arch.ccp: Most mips-specific parts.
// Copyright 2009 Bas Wijnen <wijnen@debian.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
#define ARCH
#include "kernel.hh"
void kThread_arch_init (kThread *thread):
thread->arch.at = 0
for unsigned i = 0; i < 2; ++i:
thread->arch.v[i] = 0
thread->arch.k[i] = 0
for unsigned i = 0; i < 4; ++i:
thread->arch.a[i] = 0
for unsigned i = 0; i < 10; ++i:
thread->arch.t[i] = 0
thread->arch.gp = 0
thread->arch.fp = 0
thread->arch.ra = 0
thread->arch.hi = 0
thread->arch.lo = 0
void kPage_arch_init (kPage *page):
page->arch.prev_mapped = NULL
page->arch.next_mapped = NULL
void kThread_arch_receive (kThread *thread, Iris::Num protected_data, Iris::Num *data):
thread->arch.a[0] = data[0].l
thread->arch.a[1] = data[0].h
thread->arch.a[2] = data[1].l
thread->arch.a[3] = data[1].h
thread->arch.t[0] = protected_data.l
thread->arch.t[1] = protected_data.h
unsigned *kThread_arch_info (kThread *thread, unsigned num):
switch num:
case 1:
return &thread->arch.at
case 2:
return &thread->arch.v[0]
case 3:
return &thread->arch.v[1]
case 4:
return &thread->arch.a[0]
case 5:
return &thread->arch.a[1]
case 6:
return &thread->arch.a[2]
case 7:
return &thread->arch.a[3]
case 8:
return &thread->arch.t[0]
case 9:
return &thread->arch.t[1]
case 10:
return &thread->arch.t[2]
case 11:
return &thread->arch.t[3]
case 12:
return &thread->arch.t[4]
case 13:
return &thread->arch.t[5]
case 14:
return &thread->arch.t[6]
case 15:
return &thread->arch.t[7]
case 16:
return &thread->arch.s[0]
case 17:
return &thread->arch.s[1]
case 18:
return &thread->arch.s[2]
case 19:
return &thread->arch.s[3]
case 20:
return &thread->arch.s[4]
case 21:
return &thread->arch.s[5]
case 22:
return &thread->arch.s[6]
case 23:
return &thread->arch.s[7]
case 24:
return &thread->arch.t[8]
case 25:
return &thread->arch.t[9]
case 26:
return &thread->arch.k[0]
case 27:
return &thread->arch.k[1]
case 28:
return &thread->arch.gp
case 29:
return &thread->sp
case 30:
return &thread->arch.fp
case 31:
return &thread->arch.ra
default:
return NULL
void kMemory_arch_init (kMemory *mem):
mem->arch.asid = 1
mem->arch.directory = NULL
mem->arch.shadow = NULL
static void tlb_reset (kMemory *mem, unsigned address, unsigned value):
//kdebug_line ()
unsigned asid = mem->arch.asid
if asids[asid] != (unsigned)mem:
//kdebug ("not resetting tlb, because the asid is not in use.\n")
return
//kdebug ("resetting tlb for ")
//kdebug_num (address)
//kdebug ("\n")
cp0_set (CP0_ENTRY_HI, address | asid)
__asm__ volatile ("tlbp")
unsigned idx
cp0_get (CP0_INDEX, idx)
if !(idx & (1 << 31)):
__asm__ volatile ("tlbr")
if address & (1 << PAGE_BITS):
cp0_set (CP0_ENTRY_LO1, value)
else:
cp0_set (CP0_ENTRY_LO0, value)
__asm__ volatile ("tlbwi")
#if 0
kdebug ("tlb reset ")
unsigned hi, lo0, lo1
cp0_get (CP0_ENTRY_LO0, lo0)
cp0_get (CP0_ENTRY_LO1, lo1)
cp0_get (CP0_ENTRY_HI, hi)
kdebug_num (idx, 2)
kdebug ('|')
kdebug_num (lo0)
kdebug (":")
kdebug_num (lo1)
kdebug (" for ")
kdebug_num (hi)
kdebug ("\n")
#endif
cp0_set (CP0_ENTRY_HI, old_current->address_space->arch.asid)
static unsigned make_entry_lo (kPage *page):
//kdebug_line ()
if !page->frame:
//kdebug ("not mapping because there is no frame\n")
return 0
unsigned flags
if page->flags & Iris::Page::UNCACHED:
flags = 0x10 | 0x2
else:
// 18 is write-back cache; 00 is write-through cache.
flags = 0x18 | 0x2
if ~page->flags & Iris::Page::MAPPED_READONLY:
flags |= 0x4
return ((page->frame & ~0x80000000) >> 6) | flags
bool kMemory_arch_map (kMemory *mem, kPage *page, unsigned address):
if page->mapping != ~0:
mem->unmap (page)
if address == ~0:
return true
if address >= 0x80000000:
dpanic (address, "trying to map to kernel address")
return false
if address & ~PAGE_MASK:
dpanic (address, "mapping not page-aligned")
address &= PAGE_MASK
if !mem->arch.directory:
//kdebug ("creating directory\n")
mem->arch.directory = (Table **)mem->zalloc ()
if !mem->arch.directory:
dpanic (0, "unable to allocate directory")
return false
mem->arch.shadow = (kPageP *)mem->zalloc ()
if !mem->arch.shadow:
dpanic (0, "unable to allocate shadow directory")
mem->zfree ((unsigned)mem->arch.directory)
mem->arch.directory = NULL
return false
Table *table = mem->arch.directory[address >> 21]
if !table:
//kdebug ("creating table\n")
table = (Table *)mem->zalloc ()
if !table:
dpanic (0, "unable to allocate table")
//if mem->arch.first_table == ~0:
// mem->zfree ((unsigned)mem->arch.directory)
// mem->zfree ((unsigned)mem->arch.shadow)
// mem->arch.directory = NULL
// mem->arch.shadow = NULL
return false
mem->arch.directory[address >> 21] = table
unsigned idx = (address >> 12) & ((1 << 9) - 1)
if table->entrylo[idx]:
kdebug ("page already mapped: ")
kdebug_num (idx, 3)
kdebug (";")
kdebug_num (table->entrylo[idx])
kdebug ("/")
kdebug_num ((unsigned)table->page[idx])
kdebug (" table: ")
kdebug_num ((unsigned)table)
kdebug ("\n")
mem->unmap (table->page[idx])
table->entrylo[idx] = make_entry_lo (page)
table->page[idx] = page
#if 0
kdebug ("mapped ")
kdebug_num (page->frame)
kdebug (" at address ")
kdebug_num (address)
kdebug ('\n')
#endif
page->mapping = address
page->arch.next_mapped = mem->arch.shadow[address >> 21]
if page->arch.next_mapped:
page->arch.next_mapped->arch.prev_mapped = page
mem->arch.shadow[address >> 21] = page
tlb_reset (mem, address, table->entrylo[idx])
return true
void kMemory_arch_unmap (kMemory *mem, kPage *page):
//kdebug_line ()
unsigned didx = page->mapping >> 21
unsigned tidx = (page->mapping >> 12) & ((1 << 9) - 1)
Table *table = mem->arch.directory[didx]
table->entrylo[tidx] = 0
table->page[tidx] = NULL
if page->arch.next_mapped:
page->arch.next_mapped->arch.prev_mapped = page->arch.prev_mapped
if page->arch.prev_mapped:
page->arch.prev_mapped->arch.next_mapped = page->arch.next_mapped
else:
mem->arch.shadow[didx] = page->arch.next_mapped
page->arch.prev_mapped = NULL
page->arch.next_mapped = NULL
tlb_reset (mem, page->mapping, 0)
page->mapping = ~0
kPage *kMemory_arch_get_mapping (kMemory *mem, unsigned address):
//kdebug_line ()
if address >= 0x80000000 || !mem->arch.directory:
return NULL
Table *table = mem->arch.directory[address >> 21]
if !table:
return NULL
return table->page[(address >> 12) & ((1 << 9) - 1)]
void kPage_arch_update_mapping (kPage *page):
//kdebug_line ()
if page->mapping == ~0:
return
unsigned target = make_entry_lo (page)
unsigned de = page->mapping >> 21
unsigned te = (page->mapping >> 12) & ((1 << 9) - 1)
page->address_space->arch.directory[de]->entrylo[te] = target
tlb_reset (page->address_space, page->mapping, target)
typedef unsigned cacheline[8]
void arch_uncache_page (unsigned page):
for cacheline *line = (cacheline *)page; line < (cacheline *)(page + PAGE_SIZE); ++line:
__asm__ volatile ("lw $k0, %0; cache 0x10, 0($k0); cache 0x11, 0($k0)" :: "m"(line))
void arch_register_interrupt (unsigned num, kReceiver *r):
arch_interrupt_receiver[num] = r
// And enable or disable the interrupt.
if r:
//if num != 0x18:
//kdebug ("enabled interrupt ")
//kdebug_num (num)
//kdebug (", state: ")
//kdebug_num (INTC_ISR)
//kdebug ("\n")
intc_unmask_irq (num)
else:
//kdebug ("disabled interrupt ")
//kdebug_num (num)
//kdebug ("\n")
intc_mask_irq (num)