mirror of
git://projects.qi-hardware.com/iris.git
synced 2024-12-29 18:52:25 +02:00
more
This commit is contained in:
parent
12637f5695
commit
f800bc51be
4
Makefile
4
Makefile
@ -1,12 +1,13 @@
|
||||
load = 0x80000000
|
||||
|
||||
CXXFLAGS = -Wno-unused-parameter -fno-strict-aliasing -fno-builtin -nostdinc -DNUM_THREADS=0 -I/usr/include
|
||||
CXXFLAGS = -Wno-unused-parameter -fno-strict-aliasing -fno-builtin -nostdinc -DNUM_THREADS=2 -I/usr/include
|
||||
CPPFLAGS = -O5 -Wa,-mips32
|
||||
CROSS = mipsel-linux-gnu-
|
||||
CC = $(CROSS)gcc
|
||||
LD = $(CROSS)ld
|
||||
OBJCOPY = $(CROSS)objcopy
|
||||
OBJDUMP = $(CROSS)objdump
|
||||
STRIP = $(CROSS)strip
|
||||
|
||||
kernel_sources = interrupts.cc panic.cc data.cc test.cc alloc.cc memory.cc arch.cc invoke.cc schedule.cc
|
||||
boot_sources = init.cc
|
||||
@ -37,6 +38,7 @@ entry.o: thread0 thread1
|
||||
|
||||
%: boot-helper.o boot-programs/%.o
|
||||
$(LD) $^ -o $@
|
||||
$(STRIP) $@
|
||||
|
||||
# entry.o must be the first file. boot.o must be the first of the init objects (which can be dumped after loading).
|
||||
all: entry.o $(subst .cc,.o,$(kernel_sources)) boot.o $(subst .cc,.o,$(boot_sources))
|
||||
|
@ -31,7 +31,7 @@ unsigned Memory::zalloc ():
|
||||
FreePage *ret = zero_pages
|
||||
if !ret:
|
||||
ret = junk_pages
|
||||
for unsigned i = 1; i < PAGE_SIZE; ++i:
|
||||
for unsigned i = 1; i < (PAGE_SIZE >> 2); ++i:
|
||||
((unsigned *)ret)[i] = 0
|
||||
junk_pages = ret->next
|
||||
else:
|
||||
|
@ -3,4 +3,4 @@
|
||||
|
||||
int main ():
|
||||
while true:
|
||||
__asm__ volatile ("move $v0, $zero; li $a0, 1 ; move $a1, $a0 ; move $a2, $a0 ; syscall")
|
||||
__asm__ volatile ("move $v0, $zero; li $a0, 1 ; move $a1, $zero ; move $a2, $a0 ; syscall")
|
||||
|
2
boot.S
2
boot.S
@ -45,7 +45,7 @@ start_hack_for_disassembler:
|
||||
jr $t9
|
||||
nop
|
||||
|
||||
tread_start:
|
||||
thread_start:
|
||||
.word thread0
|
||||
.word thread1
|
||||
.word thread2
|
||||
|
32
entry.S
32
entry.S
@ -8,6 +8,7 @@
|
||||
#define Random 1
|
||||
#define EntryLo0 2
|
||||
#define EntryLo1 3
|
||||
#define BadVAddr 8
|
||||
#define EntryHi 10
|
||||
#define Status 12
|
||||
#define EPC 14
|
||||
@ -51,13 +52,6 @@
|
||||
addr_000:
|
||||
// TLB refill
|
||||
// TODO: this should probably be assembly-only for speed reasons
|
||||
|
||||
//mfc0 $a0, $EPC
|
||||
li $a0, 0x11992288
|
||||
la $t9, panic
|
||||
jr $t9
|
||||
nop
|
||||
|
||||
sw $ra, -0xd88($zero)
|
||||
bal save_regs
|
||||
la $t9, tlb_refill
|
||||
@ -66,13 +60,6 @@ addr_000:
|
||||
.fill 0x100 - (. - addr_000)
|
||||
addr_100:
|
||||
// Cache error
|
||||
// TODO
|
||||
|
||||
li $a0, 0xaaaa0000
|
||||
la $t9, panic
|
||||
jr $t9
|
||||
nop
|
||||
|
||||
sw $ra, -0xd88($zero)
|
||||
bal save_regs
|
||||
la $t9, cache_error
|
||||
@ -89,13 +76,6 @@ addr_180:
|
||||
.fill 0x200 - (. - addr_000)
|
||||
addr_200:
|
||||
// Interrupt
|
||||
// TODO
|
||||
|
||||
li $a0, 0x0a0f0000
|
||||
la $t9, panic
|
||||
jr $t9
|
||||
nop
|
||||
|
||||
sw $ra, -0xd88($zero)
|
||||
bal save_regs
|
||||
la $t9, interrupt
|
||||
@ -117,6 +97,7 @@ start_idle: // 288
|
||||
// idle task would need to own capabilities.
|
||||
move $v0, $zero
|
||||
syscall
|
||||
nop
|
||||
1: wait
|
||||
b 1b
|
||||
nop
|
||||
@ -152,7 +133,6 @@ kernel_exit:
|
||||
lw $s5, SAVE_S5($v0)
|
||||
lw $s6, SAVE_S6($v0)
|
||||
lw $s7, SAVE_S7($v0)
|
||||
lw $sp, SAVE_SP($v0)
|
||||
lw $fp, SAVE_FP($v0)
|
||||
lw $ra, SAVE_RA($v0)
|
||||
lw $at, SAVE_AT($v0)
|
||||
@ -161,6 +141,8 @@ kernel_exit:
|
||||
sw $k1, -0xd90($zero)
|
||||
lw $k1, SAVE_K1($v0)
|
||||
sw $v0, -0xd8c($zero)
|
||||
|
||||
lw $sp, SAVE_SP($v0)
|
||||
lw $gp, SAVE_GP($v0)
|
||||
lw $v0, -0xd90($zero)
|
||||
eret
|
||||
@ -216,15 +198,15 @@ save_regs:
|
||||
move $t9, $ra
|
||||
la $ra, kernel_exit
|
||||
jr $t9
|
||||
move $a0, $k0
|
||||
nop
|
||||
|
||||
.globl thread0
|
||||
.globl thread1
|
||||
.globl thread2
|
||||
.balign 0x1000
|
||||
thread0:
|
||||
.balign 0x1000
|
||||
.incbin "thread0"
|
||||
thread1:
|
||||
.balign 0x1000
|
||||
thread1:
|
||||
.incbin "thread1"
|
||||
thread2:
|
||||
|
32
init.ccp
32
init.ccp
@ -31,7 +31,8 @@ static void init_idle ():
|
||||
idle_page.next_obj = NULL
|
||||
idle_page.prev = NULL
|
||||
idle_page.next = NULL
|
||||
idle_page.physical = 0
|
||||
idle_page.physical = 0x80000000
|
||||
current = &idle
|
||||
|
||||
static void init_cp0 ():
|
||||
// Set timer to a defined value
|
||||
@ -74,15 +75,16 @@ static void init_cp0 ():
|
||||
__asm__ volatile ("tlbwr")
|
||||
// Allow eret to be used to jump to the idle task.
|
||||
cp0_set (CP0_EPC, 0x284a0288)
|
||||
// Enable all interrupts and say we're handling an exception.
|
||||
// Since we're going to enter the idle task, allow access to cp0.
|
||||
cp0_set (CP0_STATUS, 0x1000ff13)
|
||||
// Wait with initializing the status register until the last moment, so that
|
||||
// exceptions in the bootup code will fill EPC and friends.
|
||||
|
||||
static void init_threads ():
|
||||
Thread *previous = NULL
|
||||
first_scheduled = NULL
|
||||
for unsigned i = 0; i < NUM_THREADS; ++i:
|
||||
Memory *mem = top_memory.alloc_memory ()
|
||||
Thread *thread = mem->alloc_thread ()
|
||||
Page **pages = (Page **)mem->palloc ()
|
||||
Page **pages = (Page **)mem->zalloc ()
|
||||
Elf32_Ehdr *header = (Elf32_Ehdr *)thread_start[i]
|
||||
for unsigned j = 0; j < SELFMAG; ++j:
|
||||
if header->e_ident[j] != ELFMAG[j]:
|
||||
@ -95,8 +97,8 @@ static void init_threads ():
|
||||
panic (i * 0x1000 + EI_VERSION, "invalid ELF version")
|
||||
if header->e_type != ET_EXEC:
|
||||
panic (i * 0x1000 + 0x10, "invalid ELF type")
|
||||
if header->e_machine != EM_MIPS_RS3_LE:
|
||||
panic (i * 0x1000 + 0x10, "invalid ELF machine")
|
||||
if header->e_machine != EM_MIPS_RS3_LE && header->e_machine != EM_MIPS:
|
||||
panic (i * 0x1000 + 0x12, "invalid ELF machine")
|
||||
thread->pc = header->e_entry
|
||||
thread->sp = 0x80000000
|
||||
for unsigned section = 0; section < header->e_shnum; ++section:
|
||||
@ -130,8 +132,9 @@ static void init_threads ():
|
||||
break
|
||||
if a < shdr->sh_addr:
|
||||
continue
|
||||
*(unsigned *)a = 0
|
||||
((unsigned *)page->physical)[(a & ~PAGE_MASK) >> 2] = 0
|
||||
for unsigned p = 0; p <= ((thread_start[i + 1] - thread_start[i] - 1) >> PAGE_BITS); ++p:
|
||||
// TODO: this also skips pages where new space is allocated.
|
||||
if pages[p]:
|
||||
continue
|
||||
++top_memory.limit
|
||||
@ -146,10 +149,19 @@ static void init_threads ():
|
||||
Capability *admin = mem->alloc_capability ((Receiver *)(CAPTYPE_ADMIN | ~PAGE_MASK), &mem->capabilities, ~0)
|
||||
thread->arch.a3 = (unsigned)admin
|
||||
mem->pfree ((unsigned)pages)
|
||||
thread->schedule_next = NULL
|
||||
thread->schedule_prev = previous
|
||||
if previous:
|
||||
previous->schedule_next = thread
|
||||
else:
|
||||
first_scheduled = thread
|
||||
previous = thread
|
||||
|
||||
// Initialize the kernel, finish by falling into the idle task.
|
||||
extern unsigned _end
|
||||
void init ():
|
||||
// Disable interrupts and set interrupt vectors to normal.
|
||||
cp0_set0 (CP0_STATUS)
|
||||
// Initialize kernel variables to empty.
|
||||
sleepers = NULL
|
||||
runners = NULL
|
||||
@ -182,5 +194,9 @@ void init ():
|
||||
|
||||
init_threads ()
|
||||
|
||||
// Enable all interrupts and say we're handling an exception.
|
||||
// Since we're going to enter the idle task, allow access to cp0.
|
||||
cp0_set (CP0_STATUS, 0x1000ff13)
|
||||
|
||||
// Done; return to user space (the idle task).
|
||||
__asm__ volatile ("eret")
|
||||
|
@ -3,8 +3,10 @@
|
||||
#include "kernel.hh"
|
||||
|
||||
/// A TLB miss has occurred. This should eventually move to entry.S.
|
||||
Thread *tlb_refill (Thread *current, unsigned EntryHi):
|
||||
panic (0x88776655, "TLB refill")
|
||||
Thread *tlb_refill ():
|
||||
//panic (0x88776655, "TLB refill")
|
||||
unsigned EntryHi
|
||||
cp0_get (CP0_ENTRY_HI, EntryHi)
|
||||
Page *page0 = current->address_space->get_mapping (EntryHi & ~(1 << 12))
|
||||
Page *page1 = current->address_space->get_mapping (EntryHi | (1 << 12))
|
||||
if (!(EntryHi & (1 << 12)) && !page0) || ((EntryHi & (1 << 12)) && !page1):
|
||||
@ -24,7 +26,8 @@ Thread *tlb_refill (Thread *current, unsigned EntryHi):
|
||||
return current
|
||||
|
||||
/// An interrupt which is not an exception has occurred.
|
||||
Thread *interrupt (Thread *current):
|
||||
Thread *interrupt ():
|
||||
panic (0x88877722)
|
||||
unsigned cause
|
||||
cp0_get (CP0_CAUSE, cause)
|
||||
for unsigned i = 0; i < 8; ++i:
|
||||
@ -32,17 +35,16 @@ Thread *interrupt (Thread *current):
|
||||
// TODO: Handle interrupt.
|
||||
// Disable all interrupts which are not handled.
|
||||
unsigned status
|
||||
__asm__ volatile ("mfc0 %0, $12" : "=r"(status))
|
||||
__asm__ volatile ("mfc0 %0, $13" : "=r"(cause))
|
||||
cp0_get (CP0_STATUS, status)
|
||||
cp0_get (CP0_CAUSE, cause)
|
||||
status &= ~(cause & 0x0000ff00)
|
||||
__asm__ volatile ("mtc0 %0, $12" :: "r"(status))
|
||||
cp0_set (CP0_STATUS, status)
|
||||
return current
|
||||
|
||||
/// A general exception has occurred.
|
||||
Thread *exception (Thread *current):
|
||||
Thread *exception ():
|
||||
unsigned cause
|
||||
led (true, true, true)
|
||||
__asm__ volatile ("mfc0 %0, $13" : "=r"(cause))
|
||||
cp0_get (CP0_CAUSE, cause)
|
||||
switch (cause >> 2) & 0x1f:
|
||||
case 0:
|
||||
// Interrupt.
|
||||
@ -51,12 +53,15 @@ Thread *exception (Thread *current):
|
||||
// TLB modification.
|
||||
panic (0x21223344, "TLB modification.")
|
||||
case 2:
|
||||
unsigned a
|
||||
cp0_get (CP0_EPC, a)
|
||||
panic (a)
|
||||
//unsigned a
|
||||
//cp0_get (CP0_EPC, a)
|
||||
//panic (a)
|
||||
// TLB load or instruction fetch.
|
||||
panic (0x31223344, "TLB load or instruction fetch.")
|
||||
case 3:
|
||||
unsigned a
|
||||
cp0_get (CP0_EPC, a)
|
||||
panic (a)
|
||||
// TLB store.
|
||||
panic (0x41223344, "TLB store.")
|
||||
case 4:
|
||||
@ -74,14 +79,15 @@ Thread *exception (Thread *current):
|
||||
case 8:
|
||||
// Syscall.
|
||||
// DEBUG: allow new exceptions.
|
||||
//cp0_set (CP0_STATUS, 0x1000ff00)
|
||||
Thread_arch_invoke ()
|
||||
//cp0_set0 (CP0_STATUS)
|
||||
arch_invoke ()
|
||||
return current
|
||||
case 9:
|
||||
// Breakpoint.
|
||||
panic (0x91223344, "Breakpoint.")
|
||||
case 10:
|
||||
// Reserved instruction.
|
||||
panic (*(unsigned *)0x004000b0)
|
||||
panic (0xa1223344, "Reserved instruction.")
|
||||
case 11:
|
||||
// Coprocessor unusable.
|
||||
@ -119,10 +125,12 @@ Thread *exception (Thread *current):
|
||||
case 29:
|
||||
case 31:
|
||||
// Reserved.
|
||||
panic (0xf5223344, "Reserved.")
|
||||
panic (0xf5223344, "Reserved exception code")
|
||||
default:
|
||||
panic (0xf6223344, "Impossible exception code")
|
||||
return current
|
||||
|
||||
/// There's a cache error. Big trouble. Probably not worth trying to recover.
|
||||
Thread *cache_error (Thread *current):
|
||||
Thread *cache_error ():
|
||||
panic (0x33333333, "cache error")
|
||||
return current
|
||||
|
@ -133,12 +133,12 @@ EXTERN Thread *current
|
||||
|
||||
// Defined in arch.cc
|
||||
void Thread_arch_init (Thread *thread)
|
||||
void Thread_arch_invoke ()
|
||||
void Memory_arch_init (Memory *mem)
|
||||
void Memory_arch_free (Memory *mem)
|
||||
bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write)
|
||||
void Memory_arch_unmap (Memory *mem, Page *page, unsigned address)
|
||||
Page *Memory_arch_get_mapping (Memory *mem, unsigned address)
|
||||
void arch_invoke ()
|
||||
void arch_schedule (Thread *previous, Thread *target)
|
||||
|
||||
bool Memory::map (Page *page, unsigned address, bool write):
|
||||
|
24
mips.ccp
24
mips.ccp
@ -54,6 +54,10 @@ void Memory_arch_free (Memory *mem):
|
||||
mem->zfree ((unsigned)mem->arch.directory)
|
||||
|
||||
bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write):
|
||||
if !mem->arch.directory:
|
||||
mem->arch.directory = (unsigned **)mem->zalloc ()
|
||||
if !mem->arch.directory:
|
||||
return false
|
||||
unsigned *table = mem->arch.directory[(unsigned)address >> 22]
|
||||
if !table:
|
||||
table = (unsigned *)mem->zalloc ()
|
||||
@ -64,6 +68,7 @@ bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write):
|
||||
if table[idx]:
|
||||
mem->unmap ((Page *)(table[idx] & ~3), address)
|
||||
table[idx] = write ? (unsigned)page : (unsigned)page + 1
|
||||
return true
|
||||
|
||||
void Memory_arch_unmap (Memory *mem, Page *page, unsigned address):
|
||||
unsigned *table = mem->arch.directory[(unsigned)address >> 22]
|
||||
@ -72,18 +77,15 @@ void Memory_arch_unmap (Memory *mem, Page *page, unsigned address):
|
||||
Page *Memory_arch_get_mapping (Memory *mem, unsigned address):
|
||||
unsigned *table = mem->arch.directory[(unsigned)address >> 22]
|
||||
unsigned v = table[((unsigned)address >> 12) & ((1 << 10) - 1)]
|
||||
return (Page *)(v & ~1)
|
||||
|
||||
void Thread_arch_invoke ():
|
||||
void arch_invoke ():
|
||||
Capability *target, *c0, *c1, *c2, *c3
|
||||
if current:
|
||||
target = current->address_space->find_capability (current->arch.v0)
|
||||
else:
|
||||
target = NULL
|
||||
target = current->address_space->find_capability (current->arch.v0)
|
||||
if !target:
|
||||
// TODO: there must be no action here. This is just because the rest doesn't work yet.
|
||||
if current:
|
||||
led (current->arch.a0, current->arch.a1, current->arch.a2)
|
||||
dbg_sleep (1000)
|
||||
led (current->arch.a0, current->arch.a1, current->arch.a2)
|
||||
dbg_sleep (1000)
|
||||
schedule ()
|
||||
return
|
||||
c0 = current->address_space->find_capability (current->arch.a0)
|
||||
@ -93,9 +95,5 @@ void Thread_arch_invoke ():
|
||||
target->invoke (current->arch.t0, current->arch.t1, current->arch.t2, current->arch.t3, c0, c1, c2, c3)
|
||||
|
||||
void arch_schedule (Thread *previous, Thread *target):
|
||||
if target:
|
||||
cp0_set (CP0_ENTRY_HI, target->address_space->arch.asid)
|
||||
else:
|
||||
// The idle tasks asid is 0.
|
||||
cp0_set (CP0_ENTRY_HI, 0)
|
||||
cp0_set (CP0_ENTRY_HI, target->address_space->arch.asid)
|
||||
// TODO: flush TLB if the asid is already taken.
|
||||
|
8
mips.hhp
8
mips.hhp
@ -63,10 +63,10 @@ EXTERN unsigned g_asid
|
||||
// Functions which can be called from assembly must not be mangled.
|
||||
extern "C":
|
||||
// Kernel entry points, called from entry.S.
|
||||
Thread *interrupt (Thread *current)
|
||||
Thread *cache_error (Thread *current)
|
||||
Thread *exception (Thread *current)
|
||||
Thread *tlb_refill (Thread *current, unsigned EntryHi)
|
||||
Thread *interrupt ()
|
||||
Thread *cache_error ()
|
||||
Thread *exception ()
|
||||
Thread *tlb_refill ()
|
||||
|
||||
#ifdef INIT
|
||||
// Initialize most things (the rest is done in boot.S)
|
||||
|
@ -3,11 +3,10 @@
|
||||
|
||||
void schedule ():
|
||||
Thread *old = current
|
||||
if current:
|
||||
current = current->schedule_next
|
||||
current = current->schedule_next
|
||||
if !current:
|
||||
current = first_scheduled
|
||||
if !current:
|
||||
current = &idle
|
||||
if !current:
|
||||
current = &idle
|
||||
if old != current:
|
||||
arch_schedule (old, current)
|
||||
|
Loading…
Reference in New Issue
Block a user