1
0
mirror of git://projects.qi-hardware.com/iris.git synced 2024-11-05 04:53:08 +02:00
iris/init.ccp
Bas Wijnen ef1b9bfe10 more
2009-06-01 01:12:54 +02:00

224 lines
8.1 KiB
COBOL

#pypp 0
// Also declare things which only work during kernel init.
#define INIT
#define ARCH
#include "kernel.hh"
#include "elf.h"
static void init_idle ():
// initialize idle task as if it is currently running.
idle.prev = NULL
idle.next = NULL
idle.schedule_prev = NULL
idle.schedule_next = NULL
idle.address_space = &idle_memory
idle.refs = NULL
idle.flags = THREAD_FLAG_RUNNING | THREAD_FLAG_PRIV
// initialize idle_memory.
idle_memory.prev = NULL
idle_memory.next = NULL
idle_memory.address_space = NULL
idle_memory.refs = NULL
idle_memory.pages = &idle_page
idle_memory.threads = &idle
idle_memory.memories = NULL
idle_memory.limit = 0
idle_memory.used = 0
idle_memory.arch.directory = (unsigned **)0x80000000
// Shadow is never used for the idle task.
idle_memory.arch.shadow = NULL
idle_memory.arch.asid = 0
// initialize idle_page
idle_page.prev = NULL
idle_page.next = NULL
idle_page.data.frame = 0x80000000
idle_page.data.flags = PAGE_FLAG_WRITABLE | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME
idle_page.refs = NULL
idle_page.address_space = NULL
current = &idle
directory = idle_memory.arch.directory
static void init_cp0 ():
// Set timer to a defined value
cp0_set (CP0_COMPARE, 1000000)
// Reset timer
cp0_set0 (CP0_COUNT)
// Use the interrupt vector for interrupts
cp0_set (CP0_CAUSE, 1 << 23)
// clear the tlb, hardwire page 0 to 0xffffffff
// and soft-wire it to (0x294 << 20) + (0x290 << 10)
// (for the idle task).
cp0_set (CP0_WIRED, 1)
cp0_set0 (CP0_PAGE_MASK)
cp0_set0 (CP0_ENTRY_LO0)
cp0_set0 (CP0_ENTRY_LO1)
// Get number of tlb entries (is 31).
unsigned num
cp0_get (CP0_CONFIG1, num)
num >>= 25
num &= 0x3f
// Clear the tlb.
for unsigned i = 1; i <= num; ++i:
// with asid 0, no page faults will be triggered, so it's safe to map memory anywhere.
cp0_set (CP0_ENTRY_HI, 0x2000 * i)
cp0_set (CP0_INDEX, i)
// write the data.
__asm__ volatile ("tlbwi")
// Fill the upper page in kseg3.
cp0_set (CP0_ENTRY_HI, 0xffffe000)
cp0_set (CP0_ENTRY_LO0, 0x1d)
cp0_set (CP0_ENTRY_LO1, 0x1f)
cp0_set0 (CP0_INDEX)
__asm__ volatile ("tlbwi")
// Fill the idle task's page in useg. Set it to non-cachable.
// its directory entry is at 1fc, so it's number 7f (0fe00000).
// its table entry is at 1f8, so it's number 7e (0007e000).
// its address is 280 (00000280), used below for EPC.
unsigned const idle_entry_hi = 0x0fe7e000
cp0_set (CP0_ENTRY_HI, idle_entry_hi)
cp0_set (CP0_ENTRY_LO0, 0x16)
cp0_set (CP0_ENTRY_LO1, 0x14)
__asm__ volatile ("tlbwr")
// Allow eret to be used to jump to the idle task.
cp0_set (CP0_EPC, (idle_entry_hi & PAGE_MASK) | 0x280)
// Wait with initializing the status register until the last moment, so that
// exceptions in the bootup code will fill EPC and friends.
// This returns unsigned, because the value is used to fill thread->arch.a*.
static unsigned mkcap (Memory *mem, unsigned type, void *obj):
return (unsigned)mem->alloc_capability ((Receiver *)type, NULL, &mem->capabilities, (unsigned)obj)
static void init_threads ():
Thread *previous = NULL
first_scheduled = NULL
for unsigned i = 0; i < NUM_THREADS; ++i:
Memory *mem = top_memory.alloc_memory ()
Thread *thread = mem->alloc_thread ()
Page **pages = (Page **)mem->zalloc ()
Elf32_Ehdr *header = (Elf32_Ehdr *)thread_start[i]
for unsigned j = 0; j < SELFMAG; ++j:
if header->e_ident[j] != ELFMAG[j]:
panic (i * 0x1000 + j, "invalid ELF magic")
if header->e_ident[EI_CLASS] != ELFCLASS32:
panic (i * 0x1000 + EI_CLASS, "invalid ELF class")
if header->e_ident[EI_DATA] != ELFDATA2LSB:
panic (i * 0x1000 + EI_DATA, "invalid ELF data")
if header->e_ident[EI_VERSION] != EV_CURRENT:
panic (i * 0x1000 + EI_VERSION, "invalid ELF version")
if header->e_type != ET_EXEC:
panic (i * 0x1000 + 0x10, "invalid ELF type")
if header->e_machine != EM_MIPS_RS3_LE && header->e_machine != EM_MIPS:
panic (i * 0x1000 + 0x12, "invalid ELF machine")
thread->pc = header->e_entry
thread->sp = 0x80000000
for unsigned section = 0; section < header->e_shnum; ++section:
Elf32_Shdr *shdr = (Elf32_Shdr *)(thread_start[i] + header->e_shoff + section * header->e_shentsize)
if !(shdr->sh_flags & SHF_ALLOC):
continue
bool writable = shdr->sh_flags & SHF_WRITE
//bool executable = shdr->sh_flags & SHF_EXEC_INSTR
if shdr->sh_type != SHT_NOBITS:
for unsigned p = (shdr->sh_addr & PAGE_MASK); p <= ((shdr->sh_addr + shdr->sh_size - 1) & PAGE_MASK); p += PAGE_SIZE:
unsigned idx = (p - (shdr->sh_addr & PAGE_MASK)) >> PAGE_BITS
if !pages[idx]:
pages[idx] = mem->alloc_page ()
pages[idx]->data.frame = thread_start[i] + (idx << PAGE_BITS)
pages[idx]->data.flags = (writable ? PAGE_FLAG_WRITABLE : 0) | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME
++top_memory.limit
if !mem->map (pages[idx], p, writable):
panic (0x22446688, "unable to map initial page")
else:
for unsigned p = (shdr->sh_addr & PAGE_MASK); p <= ((shdr->sh_addr + shdr->sh_size - 1) & PAGE_MASK); p += PAGE_SIZE:
bool write = false
Page *page = mem->get_mapping (p, &write)
if !page:
page = mem->alloc_page ()
if !page:
panic (0x00220022, "out of memory")
page->data.frame = mem->zalloc ()
page->data.flags = (writable ? PAGE_FLAG_WRITABLE : 0) | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME
if !page->data.frame || !mem->map (page, p, true):
panic (0x33557799, "unable to map initial bss page")
else:
if !write:
panic (0x20203030, "bss section starts on read-only page")
for unsigned a = p; a < p + PAGE_SIZE; a += 4:
if a >= shdr->sh_addr + shdr->sh_size:
break
if a < shdr->sh_addr:
continue
((unsigned *)page->data.frame)[(a & ~PAGE_MASK) >> 2] = 0
for unsigned p = 0; p <= ((thread_start[i + 1] - thread_start[i] - 1) >> PAGE_BITS); ++p:
if pages[p]:
continue
++top_memory.limit
top_memory.pfree (thread_start[i] + (p << PAGE_BITS))
Page *stackpage = mem->alloc_page ()
stackpage->data.frame = mem->zalloc ()
stackpage->data.flags = PAGE_FLAG_WRITABLE | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME
if !stackpage || !mem->map (stackpage, 0x7ffff000, true):
panic (0x13151719, "unable to map initial stack page")
Receiver *recv = mem->alloc_receiver ()
thread->arch.a0 = mkcap (mem, CAPTYPE_RECEIVER | CAP_RECEIVER_ALL_RIGHTS, recv)
thread->arch.a1 = mkcap (mem, CAPTYPE_THREAD | CAP_THREAD_ALL_PRIV_RIGHTS, thread)
thread->arch.a2 = mkcap (mem, CAPTYPE_MEMORY | CAP_MEMORY_ALL_RIGHTS, mem)
thread->arch.a3 = mkcap (mem, CAPTYPE_RECEIVER | CAP_RECEIVER_CALL, recv)
mem->pfree ((unsigned)pages)
thread->flags = THREAD_FLAG_RUNNING | THREAD_FLAG_PRIV
thread->schedule_next = NULL
thread->schedule_prev = previous
if previous:
previous->schedule_next = thread
else:
first_scheduled = thread
previous = thread
// Initialize the kernel, finish by falling into the idle task.
extern unsigned _end
void init ():
// Disable interrupts and set interrupt vectors to normal.
cp0_set0 (CP0_STATUS)
// Initialize kernel variables to empty.
sleepers = NULL
runners = NULL
zero_pages = NULL
// Fill junk pages with all memory not currently used.
junk_pages = (FreePage *)(((unsigned)&_end + ~PAGE_MASK) & PAGE_MASK)
FreePage *p, *next
unsigned count = 1
for p = junk_pages, next = p; (unsigned)next - 0x80000000 < (1 << 27); p = next, next = (FreePage *)((unsigned)p + ~PAGE_MASK + 1):
p->next = next
++count
p->next = NULL
// initialize system control coprocessor.
init_cp0 ()
// initialize everything about the idle task.
init_idle ()
// initialize top_memory.
top_memory.prev = NULL
top_memory.next = NULL
top_memory.address_space = NULL
top_memory.refs = NULL
top_memory.pages = NULL
top_memory.threads = NULL
top_memory.memories = NULL
top_memory.limit = count
top_memory.used = 0
top_memory.arch.directory = NULL
top_memory.arch.asid = 0
for unsigned i = 0; i < 63; ++i:
asids[i] = i + 1
asids[63] = 0
init_threads ()
// Say we're handling an exception. Don't enable interrupts; this will happen when handlers are registered.
// Since we're going to enter the idle task, allow access to cp0.
cp0_set (CP0_STATUS, 0x10000013)
// Done; return to user space (the idle task).
__asm__ volatile ("eret")