#pypp 0 // Iris: micro-kernel for a capability-based operating system. // mips/arch.ccp: Most mips-specific parts. // Copyright 2009 Bas Wijnen // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see . #define ARCH #include "kernel.hh" void kThread_arch_init (kThread *thread): thread->arch.at = 0 for unsigned i = 0; i < 2; ++i: thread->arch.v[i] = 0 thread->arch.k[i] = 0 for unsigned i = 0; i < 4; ++i: thread->arch.a[i] = 0 for unsigned i = 0; i < 10; ++i: thread->arch.t[i] = 0 thread->arch.gp = 0 thread->arch.fp = 0 thread->arch.ra = 0 thread->arch.hi = 0 thread->arch.lo = 0 void kThread_arch_receive (kThread *thread, Kernel::Num protected_data, Kernel::Num *data): thread->arch.a[0] = data[0].l thread->arch.a[1] = data[0].h thread->arch.a[2] = data[1].l thread->arch.a[3] = data[1].h thread->arch.t[0] = protected_data.l thread->arch.t[1] = protected_data.h unsigned *kThread_arch_info (kThread *thread, unsigned num): switch num: case 1: return &thread->arch.at case 2: return &thread->arch.v[0] case 3: return &thread->arch.v[1] case 4: return &thread->arch.a[0] case 5: return &thread->arch.a[1] case 6: return &thread->arch.a[2] case 7: return &thread->arch.a[3] case 8: return &thread->arch.t[0] case 9: return &thread->arch.t[1] case 10: return &thread->arch.t[2] case 11: return &thread->arch.t[3] case 12: return &thread->arch.t[4] case 13: return &thread->arch.t[5] case 14: return &thread->arch.t[6] case 15: return &thread->arch.t[7] case 16: return &thread->arch.s[0] case 17: return &thread->arch.s[1] case 18: return &thread->arch.s[2] case 19: return &thread->arch.s[3] case 20: return &thread->arch.s[4] case 21: return &thread->arch.s[5] case 22: return &thread->arch.s[6] case 23: return &thread->arch.s[7] case 24: return &thread->arch.t[8] case 25: return &thread->arch.t[9] case 26: return &thread->arch.k[0] case 27: return &thread->arch.k[1] case 28: return &thread->arch.gp case 29: return &thread->sp case 30: return &thread->arch.fp case 31: return &thread->arch.ra default: return NULL void kMemory_arch_init (kMemory *mem): mem->arch.asid = 1 mem->arch.directory = NULL mem->arch.shadow = NULL void kMemory_arch_free (kMemory *mem): while mem->arch.first_page_table: mem->unmap (mem->arch.first_page_table->first_page->page, mem->arch.first_page_table->first_page->mapping) if (kMemory *)asids[mem->arch.asid] == mem: flush_tlb (mem->arch.asid) asids[mem->arch.asid] = asids[0] asids[0] = mem->arch.asid mem->unuse () mem->zfree ((unsigned)mem->arch.directory) static arch_page_table *alloc_page_table (kMemory *mem): arch_page_table *ret = (arch_page_table *)mem->search_free (sizeof (arch_page_table), (void **)&mem->arch.first_page_table) if !ret: return NULL ret->first_page = NULL return ret static arch_page *alloc_page (kMemory *mem, arch_page_table *t): arch_page *ret = (arch_page *)mem->search_free (sizeof (arch_page), (void **)&t->first_page) if !ret: return NULL ret->page = NULL ret->mapping = ~0 ret->prev_mapped = NULL ret->next_mapped = NULL return ret static void free_page_table (arch_page_table *t, unsigned idx): kMemory *mem = t->address_space mem->zfree ((unsigned)mem->arch.directory[idx]) mem->arch.directory[idx] = NULL mem->arch.shadow[idx] = NULL mem->free_obj (t, (void **)&mem->arch.first_page_table) if !mem->arch.first_page_table: mem->zfree ((unsigned)mem->arch.directory) mem->zfree ((unsigned)mem->arch.shadow) mem->arch.directory = NULL mem->arch.shadow = NULL static void tlb_reset (unsigned address, unsigned asid, unsigned value): cp0_set (CP0_ENTRY_HI, address | asid) __asm__ volatile ("tlbp") unsigned idx cp0_get (CP0_INDEX, idx) if ~idx & 0x80000000: if address & (1 << PAGE_BITS): cp0_set (CP0_ENTRY_LO1, value) else: cp0_set (CP0_ENTRY_LO0, value) __asm__ volatile ("tlbwi") static void free_page (arch_page_table *t, arch_page *p): if p->prev_mapped: p->prev_mapped->next_mapped = p->next_mapped else: p->page->arch.first_mapped = p->next_mapped if p->next_mapped: p->next_mapped->prev_mapped = p->prev_mapped tlb_reset (p->mapping, p->address_space->arch.asid, 0) unsigned idx = p->mapping >> 21 p->address_space->free_obj (p, (void **)&t->first_page) if !t->first_page: free_page_table (t, idx) static unsigned make_entry_lo (kPage *page, bool readonly): if !page->frame: return 0 unsigned flags if page->flags & Kernel::Page::UNCACHED: flags = 0x10 | 0x2 else: flags = 0x18 | 0x2 if !readonly: flags |= 0x4 return ((page->frame & ~0x80000000) >> 6) | flags bool kMemory_arch_map (kMemory *mem, kPage *page, unsigned address, bool readonly): if address >= 0x80000000: panic (0x32134293, "trying to map to kernel address") return false address &= PAGE_MASK if !mem->arch.directory: mem->arch.directory = (unsigned **)mem->zalloc () if !mem->arch.directory: return false mem->arch.shadow = (arch_page_table **)mem->zalloc () if !mem->arch.shadow: mem->zfree ((unsigned)mem->arch.directory) mem->arch.directory = NULL return false unsigned *table = mem->arch.directory[address >> 21] arch_page_table *t = mem->arch.shadow[address >> 21] if !table: table = (unsigned *)mem->zalloc () if !table: if !mem->arch.first_page_table: mem->zfree ((unsigned)mem->arch.directory) mem->zfree ((unsigned)mem->arch.shadow) mem->arch.directory = NULL mem->arch.shadow = NULL return false t = alloc_page_table (mem) if !t: mem->zfree ((unsigned)table) if !mem->arch.first_page_table: mem->zfree ((unsigned)mem->arch.directory) mem->zfree ((unsigned)mem->arch.shadow) mem->arch.directory = NULL mem->arch.shadow = NULL return false mem->arch.directory[address >> 21] = table mem->arch.shadow[address >> 21] = t arch_page *p = alloc_page (mem, t) if !p: if !t->first_page: // This automatically cleans up the rest. free_page_table (t, address >> 21) return false unsigned idx = (address >> 12) & ((1 << 9) - 1) if table[idx]: mem->unmap ((kPage *)table[idx + 0x200], address) table[idx] = make_entry_lo (page, readonly) table[idx + 0x200] = (unsigned)p p->mapping = address + readonly p->page = page p->next_mapped = page->arch.first_mapped if p->next_mapped: p->next_mapped->prev_mapped = p page->arch.first_mapped = p return true void kMemory_arch_unmap (kMemory *mem, kPage *page, unsigned address): unsigned didx = address >> 21 unsigned tidx = (address >> 12) & ((1 << 9) - 1) unsigned *table = mem->arch.directory[didx] arch_page_table *t = mem->arch.shadow[didx] table[tidx] = 0 arch_page *p = (arch_page *)table[tidx + 0x200] table[tidx + 0x200] = 0 free_page (t, p) kPage *kMemory_arch_get_mapping (kMemory *mem, unsigned address, bool *readonly): if address >= 0x80000000: return NULL unsigned *table = mem->arch.directory[address >> 21] unsigned idx = (address >> 12) & ((1 << 9) - 1) arch_page *page = (arch_page *)table[idx + 0x200] if readonly: *readonly = !(table[idx] & 4) return page->page void kPage_arch_update_mapping (kPage *page): if !page->arch.first_mapped: return kMemory *as = page->address_space unsigned target = make_entry_lo (page, page->flags & Kernel::Page::READONLY) for arch_page *p = page->arch.first_mapped; p; p = p->next_mapped: unsigned de = p->mapping >> 21 unsigned te = (p->mapping >> 12) & ((1 << 9) - 1) bool readonly = p->mapping & 1 unsigned t if readonly: t = target & ~0x4 else: t = target as->arch.directory[de][te] = t tlb_reset (p->mapping & ~1, as->arch.asid, t) void arch_register_interrupt (unsigned num, kReceiver *r): arch_interrupt_receiver[num] = r // And enable or disable the interrupt. if r: intc_unmask_irq (num) else: intc_mask_irq (num)