mirror of
git://projects.qi-hardware.com/iris.git
synced 2024-11-04 22:54:03 +02:00
change mapping system; implement fast tlb refill
This commit is contained in:
parent
a65e959a8a
commit
e13dc897fd
35
entry.S
35
entry.S
@ -2,7 +2,9 @@
|
||||
.lcomm kernel_stack, KERNEL_STACK_SIZE
|
||||
|
||||
.globl run_idle
|
||||
.globl directory
|
||||
.set noat
|
||||
.set noreorder
|
||||
|
||||
#define ARCH
|
||||
#define ASM
|
||||
@ -10,7 +12,34 @@
|
||||
|
||||
addr_000:
|
||||
// TLB refill
|
||||
// TODO: this should probably be assembly-only for speed reasons
|
||||
bne $zero, $k0, slow_refill
|
||||
nop
|
||||
bne $zero, $k1, slow_refill
|
||||
nop
|
||||
lw $k1, -0xd94($zero)
|
||||
beq $zero, $k1, slow_refill
|
||||
mfc0 $k0, $CP0_ENTRY_HI
|
||||
srl $k0, $k0, 19
|
||||
and $k0, $k0, 0x3fc
|
||||
addu $k0, $k0, $k1
|
||||
beq $zero, $k0, slow_refill0
|
||||
lw $k0, 0($k0)
|
||||
mfc0 $k1, $CP0_ENTRY_HI
|
||||
srl $k1, $k1, 10
|
||||
and $k1, $k1, 0x1f8
|
||||
add $k0, $k0, $k1
|
||||
lw $k1, 0($k0)
|
||||
mtc0 $k1, $CP0_ENTRY_LO0
|
||||
lw $k1, 4($k0)
|
||||
mtc0 $k1, $CP0_ENTRY_LO1
|
||||
tlbwr
|
||||
move $zero, $k0
|
||||
move $zero, $k1
|
||||
eret
|
||||
|
||||
slow_refill0:
|
||||
move $k1, $zero
|
||||
slow_refill:
|
||||
sw $ra, -0xd88($zero)
|
||||
bal save_regs
|
||||
la $t9, tlb_refill
|
||||
@ -42,7 +71,9 @@ addr_200:
|
||||
nop
|
||||
.fill 0x280 - (. - addr_000) - 16
|
||||
|
||||
// space for save_regs: k0; current Thread; ra; gp
|
||||
directory:
|
||||
.word 0 // -d94 == directory
|
||||
// space for save_regs
|
||||
.word 0 // -d90 == k0
|
||||
.word idle // -d8c == current
|
||||
.word 0 // -d88 == ra
|
||||
|
1
init.ccp
1
init.ccp
@ -32,6 +32,7 @@ static void init_idle ():
|
||||
idle_page.refs = NULL
|
||||
idle_page.address_space = NULL
|
||||
current = &idle
|
||||
directory = idle_memory.arch.directory
|
||||
|
||||
static void init_cp0 ():
|
||||
// Set timer to a defined value
|
||||
|
@ -2,27 +2,22 @@
|
||||
#define ARCH
|
||||
#include "kernel.hh"
|
||||
|
||||
/// A TLB miss has occurred. This should eventually move to entry.S.
|
||||
/// A TLB miss has occurred. This is the slow version. It is only used
|
||||
/// when k0 or k1 is not 0, or when an error occurs.
|
||||
/// Otherwise, the ultra-fast code in entry.S is used.
|
||||
Thread *tlb_refill ():
|
||||
//panic (0x88776655, "TLB refill")
|
||||
if !directory:
|
||||
panic (0x44449999, "No directory")
|
||||
unsigned EntryHi
|
||||
cp0_get (CP0_ENTRY_HI, EntryHi)
|
||||
bool write0 = false, write1 = false
|
||||
Page *page0 = current->address_space->get_mapping (EntryHi & ~(1 << 12), &write0)
|
||||
Page *page1 = current->address_space->get_mapping (EntryHi | (1 << 12), &write1)
|
||||
if (!(EntryHi & (1 << 12)) && !page0) || ((EntryHi & (1 << 12)) && !page1):
|
||||
panic (0x22222222, "no page mapped at requested address")
|
||||
unsigned low0, low1
|
||||
if page0:
|
||||
low0 = ((page0->physical & ~0x80000fff) >> 6) | 0x18 | (write0 ? 0x4 : 0) | 0x2
|
||||
else
|
||||
low0 = 0
|
||||
if page1:
|
||||
low1 = ((page1->physical & ~0x80000fff) >> 6) | 0x18 | (write1 ? 0x4 : 0) | 0x2
|
||||
else
|
||||
low1 = 0
|
||||
cp0_set (CP0_ENTRY_LO0, low0)
|
||||
cp0_set (CP0_ENTRY_LO1, low1)
|
||||
unsigned *t = directory[EntryHi >> 21]
|
||||
if !t:
|
||||
panic (0x99992222, "No page table")
|
||||
// - 2 instead of - 1 means reset bit 0
|
||||
unsigned idx = (EntryHi >> 12) & ((1 << 9) - 2)
|
||||
cp0_set (CP0_ENTRY_LO0, t[idx])
|
||||
cp0_set (CP0_ENTRY_LO1, t[idx + 1])
|
||||
__asm__ volatile ("tlbwr")
|
||||
return current
|
||||
|
||||
|
@ -20,8 +20,6 @@ struct Capability
|
||||
struct Cappage
|
||||
struct Memory
|
||||
|
||||
#include "arch.hh"
|
||||
|
||||
struct Object_base:
|
||||
Capability *refs
|
||||
Memory *address_space
|
||||
@ -40,8 +38,11 @@ struct Free : public Object <Free>:
|
||||
bool Object_base::is_free ():
|
||||
return ((Free *)this)->marker == ~0
|
||||
|
||||
#include "arch.hh"
|
||||
|
||||
struct Page : public Object <Page>:
|
||||
unsigned physical
|
||||
Page_arch arch
|
||||
|
||||
struct Thread : public Object <Thread>:
|
||||
Receiver *receivers
|
||||
|
129
mips.ccp
129
mips.ccp
@ -45,6 +45,7 @@ void Thread_arch_receive_fail (Thread *thread):
|
||||
void Memory_arch_init (Memory *mem):
|
||||
mem->arch.asid = 1
|
||||
mem->arch.directory = NULL
|
||||
mem->arch.shadow = NULL
|
||||
|
||||
static void flush_tlb (unsigned asid):
|
||||
for unsigned tlb = 1; tlb < 32; ++tlb:
|
||||
@ -58,20 +59,8 @@ static void flush_tlb (unsigned asid):
|
||||
__asm__ volatile ("tlbwi")
|
||||
|
||||
void Memory_arch_free (Memory *mem):
|
||||
if !mem->arch.directory:
|
||||
return
|
||||
for unsigned i = 0; i < PAGE_SIZE; ++i:
|
||||
unsigned *table = mem->arch.directory[i]
|
||||
if !table:
|
||||
continue
|
||||
for unsigned j = 0; j < PAGE_SIZE; ++j:
|
||||
Page *page = (Page *)(table[j] & ~3)
|
||||
if !page:
|
||||
continue
|
||||
mem->unmap (page, i * 0x1000 * 0x400 + j * 0x1000)
|
||||
mem->unuse ()
|
||||
mem->zfree ((unsigned)table)
|
||||
mem->arch.directory[i] = NULL
|
||||
while mem->arch.first_page_table:
|
||||
mem->unmap (mem->arch.first_page_table->first_page->page, mem->arch.first_page_table->first_page->mapping)
|
||||
if (Memory *)asids[mem->arch.asid] == mem:
|
||||
flush_tlb (mem->arch.asid)
|
||||
asids[mem->arch.asid] = asids[0]
|
||||
@ -79,33 +68,122 @@ void Memory_arch_free (Memory *mem):
|
||||
mem->unuse ()
|
||||
mem->zfree ((unsigned)mem->arch.directory)
|
||||
|
||||
static arch_page_table *alloc_page_table (Memory *mem):
|
||||
arch_page_table *ret = (arch_page_table *)mem->search_free (sizeof (arch_page_table), (void **)&mem->arch.first_page_table)
|
||||
if !ret:
|
||||
return NULL
|
||||
ret->first_page = NULL
|
||||
return ret
|
||||
|
||||
static arch_page *alloc_page (Memory *mem, arch_page_table *t):
|
||||
arch_page *ret = (arch_page *)mem->search_free (sizeof (arch_page), (void **)&t->first_page)
|
||||
if !ret:
|
||||
return NULL
|
||||
ret->page = NULL
|
||||
ret->mapping = ~0
|
||||
ret->prev_mapped = NULL
|
||||
ret->next_mapped = NULL
|
||||
return ret
|
||||
|
||||
static void free_page_table (arch_page_table *t, unsigned idx):
|
||||
Memory *mem = t->address_space
|
||||
if t->next:
|
||||
t->next->prev = t->prev
|
||||
if t->prev:
|
||||
t->prev->next = t->next
|
||||
else:
|
||||
mem->arch.first_page_table = t->next
|
||||
mem->zfree ((unsigned)mem->arch.directory[idx])
|
||||
mem->arch.directory[idx] = NULL
|
||||
mem->arch.shadow[idx] = NULL
|
||||
mem->free_obj (t)
|
||||
if !mem->arch.first_page_table:
|
||||
mem->zfree ((unsigned)mem->arch.directory)
|
||||
mem->zfree ((unsigned)mem->arch.shadow)
|
||||
mem->arch.directory = NULL
|
||||
mem->arch.shadow = NULL
|
||||
|
||||
static void free_page (arch_page_table *t, arch_page *p):
|
||||
if p->next:
|
||||
p->next->prev = p->prev
|
||||
if p->prev:
|
||||
p->prev->next = p->next
|
||||
else:
|
||||
t->first_page = p->next
|
||||
if p->prev_mapped:
|
||||
p->prev_mapped->next_mapped = p->next_mapped
|
||||
else:
|
||||
p->page->arch.first_mapped = p->next_mapped
|
||||
if p->next_mapped:
|
||||
p->next_mapped->prev_mapped = p->prev_mapped
|
||||
unsigned idx = p->mapping >> 21
|
||||
p->address_space->free_obj (p)
|
||||
if !t->first_page:
|
||||
free_page_table (t, idx)
|
||||
|
||||
bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write):
|
||||
if !mem->arch.directory:
|
||||
mem->arch.directory = (unsigned **)mem->zalloc ()
|
||||
if !mem->arch.directory:
|
||||
return false
|
||||
unsigned *table = mem->arch.directory[address >> 22]
|
||||
mem->arch.shadow = (arch_page_table **)mem->zalloc ()
|
||||
if !mem->arch.shadow:
|
||||
mem->zfree ((unsigned)mem->arch.directory)
|
||||
mem->arch.directory = NULL
|
||||
return false
|
||||
unsigned *table = mem->arch.directory[address >> 21]
|
||||
arch_page_table *t = mem->arch.shadow[address >> 21]
|
||||
if !table:
|
||||
table = (unsigned *)mem->zalloc ()
|
||||
if !table:
|
||||
if !mem->arch.first_page_table:
|
||||
mem->zfree ((unsigned)mem->arch.directory)
|
||||
mem->zfree ((unsigned)mem->arch.shadow)
|
||||
return false
|
||||
mem->arch.directory[address >> 22] = table
|
||||
unsigned idx = (address >> 12) & ((1 << 10) - 1)
|
||||
t = alloc_page_table (mem)
|
||||
if !t:
|
||||
mem->zfree ((unsigned)table)
|
||||
if !mem->arch.first_page_table:
|
||||
mem->zfree ((unsigned)mem->arch.directory)
|
||||
mem->zfree ((unsigned)mem->arch.shadow)
|
||||
return false
|
||||
mem->arch.directory[address >> 21] = table
|
||||
arch_page *p = alloc_page (mem, t)
|
||||
if !p:
|
||||
if !t->first_page:
|
||||
// This automatically cleans up the rest.
|
||||
free_page_table (t, address >> 21)
|
||||
return false
|
||||
unsigned idx = (address >> 12) & ((1 << 9) - 1)
|
||||
if table[idx]:
|
||||
mem->unmap ((Page *)(table[idx] & ~3), address)
|
||||
table[idx] = write ? (unsigned)page : (unsigned)page + 1
|
||||
mem->unmap ((Page *)table[idx + 0x200], address)
|
||||
table[idx] = page->physical ? (page->physical >> 6) | 0x18 | (write ? 0x4 : 0) | 0x2 : 0
|
||||
table[idx + 0x200] = (unsigned)p
|
||||
p->mapping = address
|
||||
p->page = page
|
||||
p->next_mapped = page->arch.first_mapped
|
||||
if p->next_mapped:
|
||||
p->next_mapped->prev_mapped = p
|
||||
page->arch.first_mapped = p
|
||||
return true
|
||||
|
||||
void Memory_arch_unmap (Memory *mem, Page *page, unsigned address):
|
||||
unsigned *table = mem->arch.directory[address >> 22]
|
||||
table[(address >> 12) & ((1 << 10) - 1)] = 0
|
||||
unsigned didx = address >> 21
|
||||
unsigned tidx = (address >> 12) & ((1 << 9) - 1)
|
||||
unsigned *table = mem->arch.directory[didx]
|
||||
arch_page_table *t = mem->arch.shadow[didx]
|
||||
table[tidx] = 0
|
||||
arch_page *p = (arch_page *)table[tidx + 0x200]
|
||||
table[tidx + 0x200] = 0
|
||||
free_page (t, p)
|
||||
|
||||
Page *Memory_arch_get_mapping (Memory *mem, unsigned address, bool *writable):
|
||||
unsigned *table = mem->arch.directory[address >> 22]
|
||||
unsigned v = table[(address >> 12) & ((1 << 10) - 1)]
|
||||
unsigned *table = mem->arch.directory[address >> 21]
|
||||
unsigned idx = (address >> 12) & ((1 << 9) - 1)
|
||||
arch_page *page = (arch_page *)table[idx + 0x200]
|
||||
if writable:
|
||||
*writable = !(v & 1)
|
||||
return (Page *)(v & ~1)
|
||||
*writable = (table[idx] & 4 ? 1 : 0)
|
||||
return page->page
|
||||
|
||||
void arch_invoke ():
|
||||
Capability *target, *c[4]
|
||||
@ -147,3 +225,4 @@ void arch_invoke ():
|
||||
random = 1
|
||||
asids[current->address_space->arch.asid] = (unsigned)current
|
||||
cp0_set (CP0_ENTRY_HI, current->address_space->arch.asid)
|
||||
directory = current->address_space->arch.directory
|
||||
|
25
mips.hhp
25
mips.hhp
@ -92,9 +92,30 @@ struct Thread_arch:
|
||||
unsigned s0, s1, s2, s3, s4, s5, s6, s7
|
||||
unsigned gp, fp, ra, hi, lo, k0, k1
|
||||
|
||||
// The following is used for page mapping.
|
||||
// Each Memory has a two directories with 0x400 entries,
|
||||
// page tables and mapping page tables. Mapping page tables are
|
||||
// pages which contain 0x200 EntryLo. values and 0x200 Page pointers.
|
||||
// For a virtual address, bits 0-11 are in the physical address,
|
||||
// bits 12-20 are an index in the page table, bits 21-30
|
||||
// are an index in the page directory and bit 31 is always 0.
|
||||
|
||||
struct arch_page : public Object <arch_page> :
|
||||
Page *page
|
||||
unsigned mapping
|
||||
arch_page *prev_mapped, *next_mapped
|
||||
|
||||
struct arch_page_table : public Object <arch_page_table> :
|
||||
arch_page *first_page
|
||||
|
||||
struct Page_arch:
|
||||
arch_page *first_mapped
|
||||
|
||||
struct Memory_arch:
|
||||
unsigned asid
|
||||
unsigned **directory
|
||||
arch_page_table **shadow
|
||||
arch_page_table *first_page_table
|
||||
|
||||
// Pointers to Memory when asid is taken, index of next free, or 0, if free.
|
||||
// asid[0] is used as index to first free asid.
|
||||
@ -115,10 +136,12 @@ extern "C":
|
||||
void run_idle (Thread *self)
|
||||
#endif
|
||||
|
||||
// These are "extern", not "EXTERN", because they really are defined elsewhere.
|
||||
#ifdef INIT
|
||||
// This is "extern", not "EXTERN", because it really is defined elsewhere.
|
||||
extern unsigned thread_start[NUM_THREADS + 1]
|
||||
#endif
|
||||
// Fast pointer to page directory, for tlb miss events
|
||||
extern unsigned **directory
|
||||
|
||||
#endif // defined ASM
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user