mirror of
git://projects.qi-hardware.com/iris.git
synced 2024-11-05 06:04:59 +02:00
279 lines
9.9 KiB
COBOL
279 lines
9.9 KiB
COBOL
#pypp 0
|
|
// Iris: micro-kernel for a capability-based operating system.
|
|
// mips/init.ccp: mips-specific boot code.
|
|
// Copyright 2009 Bas Wijnen <wijnen@debian.org>
|
|
//
|
|
// This program is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU General Public License as published by
|
|
// the Free Software Foundation, either version 3 of the License, or
|
|
// (at your option) any later version.
|
|
//
|
|
// This program is distributed in the hope that it will be useful,
|
|
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
// GNU General Public License for more details.
|
|
//
|
|
// You should have received a copy of the GNU General Public License
|
|
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
// Also declare things which only work during kernel init.
|
|
#define INIT
|
|
#define ARCH
|
|
#include "../kernel.hh"
|
|
#include <elf.h>
|
|
|
|
#define NUM_SLOTS 4
|
|
#define NUM_CAPS 16
|
|
|
|
static void init_idle ():
|
|
// initialize idle task as if it is currently running.
|
|
idle.prev = NULL
|
|
idle.next = NULL
|
|
idle.schedule_prev = NULL
|
|
idle.schedule_next = NULL
|
|
idle.address_space = &idle_memory
|
|
idle.refs.reset ()
|
|
idle.flags = Kernel::Thread::RUNNING | Kernel::Thread::PRIV
|
|
// initialize idle_memory.
|
|
idle_memory.prev = NULL
|
|
idle_memory.next = NULL
|
|
idle_memory.address_space = NULL
|
|
idle_memory.refs.reset ()
|
|
idle_memory.pages = &idle_page
|
|
idle_memory.threads = &idle
|
|
idle_memory.memories = NULL
|
|
idle_memory.limit = 0
|
|
idle_memory.used = 0
|
|
idle_memory.arch.directory = (unsigned **)0x80000000
|
|
// Shadow is never used for the idle task.
|
|
idle_memory.arch.shadow = NULL
|
|
idle_memory.arch.asid = 0
|
|
// initialize idle_page
|
|
idle_page.prev = NULL
|
|
idle_page.next = NULL
|
|
idle_page.frame = 0x80000000
|
|
idle_page.flags = Kernel::Page::PAYING | Kernel::Page::FRAME
|
|
idle_page.refs.reset ()
|
|
idle_page.address_space = NULL
|
|
current = &idle
|
|
directory = idle_memory.arch.directory
|
|
|
|
static void init_cp0 ():
|
|
// Disable watchpoint interrupts.
|
|
cp0_set0 (CP0_WATCH_LO)
|
|
// Use the interrupt vector for interrupts; clear interrupt pending flags.
|
|
cp0_set (CP0_CAUSE, 1 << 23)
|
|
// Disable interrupts and set interrupt vectors to normal.
|
|
cp0_set0 (CP0_STATUS)
|
|
// Reset exception base address.
|
|
cp0_set0 (CP0_EBASE)
|
|
// Use non-vectored interrupts.
|
|
cp0_set0 (CP0_INT_CTL)
|
|
|
|
// clear the tlb, hardwire page 0 to 0xffffffff
|
|
// and soft-wire it to (0x294 << 20) + (0x290 << 10)
|
|
// (for the idle task).
|
|
|
|
cp0_set (CP0_WIRED, 1)
|
|
cp0_set0 (CP0_PAGE_MASK)
|
|
cp0_set0 (CP0_ENTRY_LO0)
|
|
cp0_set0 (CP0_ENTRY_LO1)
|
|
// Get number of tlb entries (is 31).
|
|
unsigned num
|
|
cp0_get (CP0_CONFIG1, num)
|
|
num >>= 25
|
|
num &= 0x3f
|
|
// Clear the tlb.
|
|
for unsigned i = 1; i <= num; ++i:
|
|
// with asid 0, no page faults will be triggered, so it's safe to map memory anywhere.
|
|
cp0_set (CP0_ENTRY_HI, 0x2000 * i)
|
|
cp0_set (CP0_INDEX, i)
|
|
// write the data.
|
|
__asm__ volatile ("tlbwi")
|
|
// Fill the upper page in kseg3.
|
|
cp0_set (CP0_ENTRY_HI, 0xffffe000)
|
|
cp0_set (CP0_ENTRY_LO0, 0x1d)
|
|
cp0_set (CP0_ENTRY_LO1, 0x1f)
|
|
cp0_set0 (CP0_INDEX)
|
|
__asm__ volatile ("tlbwi")
|
|
// Fill the idle task's page in useg. Set it to non-cachable.
|
|
// its directory entry is at 1fc, so it's number 7f (0fe00000).
|
|
// its table entry is at 1f8, so it's number 7e (0007e000).
|
|
// its address is 280 (00000280), used below for EPC.
|
|
unsigned const idle_entry_hi = 0x0fe7e000
|
|
cp0_set (CP0_ENTRY_HI, idle_entry_hi)
|
|
cp0_set (CP0_ENTRY_LO0, 0x16)
|
|
cp0_set (CP0_ENTRY_LO1, 0x14)
|
|
__asm__ volatile ("tlbwr")
|
|
// Allow eret to be used to jump to the idle task.
|
|
cp0_set (CP0_EPC, (idle_entry_hi & PAGE_MASK) | 0x280)
|
|
// Wait with initializing the status register until the last moment, so that
|
|
// exceptions in the bootup code will fill EPC and friends.
|
|
|
|
static void init_threads ():
|
|
kThread *previous = NULL
|
|
first_scheduled = NULL
|
|
first_alarm = NULL
|
|
kReceiver *init_receiver = NULL
|
|
for unsigned i = 0; i < NUM_THREADS; ++i:
|
|
kMemory *mem = top_memory.alloc_memory ()
|
|
assert (mem)
|
|
kThread *thread = mem->alloc_thread (NUM_SLOTS)
|
|
kPage **pages = (kPage **)mem->zalloc ()
|
|
Elf32_Ehdr *header = (Elf32_Ehdr *)thread_start[i]
|
|
for unsigned j = 0; j < SELFMAG; ++j:
|
|
if header->e_ident[j] != ELFMAG[j]:
|
|
panic (i * 0x1000 + j, "invalid ELF magic")
|
|
return
|
|
if header->e_ident[EI_CLASS] != ELFCLASS32:
|
|
panic (i * 0x1000 + EI_CLASS, "invalid ELF class")
|
|
return
|
|
if header->e_ident[EI_DATA] != ELFDATA2LSB:
|
|
panic (i * 0x1000 + EI_DATA, "invalid ELF data")
|
|
return
|
|
if header->e_ident[EI_VERSION] != EV_CURRENT:
|
|
panic (i * 0x1000 + EI_VERSION, "invalid ELF version")
|
|
return
|
|
if header->e_type != ET_EXEC:
|
|
panic (i * 0x1000 + 0x10, "invalid ELF type")
|
|
return
|
|
if header->e_machine != EM_MIPS_RS3_LE && header->e_machine != EM_MIPS:
|
|
panic (i * 0x1000 + 0x12, "invalid ELF machine")
|
|
return
|
|
thread->pc = header->e_entry
|
|
thread->sp = 0x80000000
|
|
for unsigned section = 0; section < header->e_shnum; ++section:
|
|
Elf32_Shdr *shdr = (Elf32_Shdr *)(thread_start[i] + header->e_shoff + section * header->e_shentsize)
|
|
if ~shdr->sh_flags & SHF_ALLOC:
|
|
continue
|
|
bool readonly = !(shdr->sh_flags & SHF_WRITE)
|
|
//bool executable = shdr->sh_flags & SHF_EXEC_INSTR
|
|
if shdr->sh_type != SHT_NOBITS:
|
|
unsigned file_offset = shdr->sh_offset >> PAGE_BITS
|
|
if ((file_offset + shdr->sh_size) >> PAGE_BITS) >= (PAGE_SIZE >> 2):
|
|
panic (0x87446809, "initial thread too large")
|
|
return
|
|
for unsigned p = (shdr->sh_addr & PAGE_MASK); p < shdr->sh_addr + shdr->sh_size; p += PAGE_SIZE:
|
|
unsigned section_offset = (p - (shdr->sh_addr & PAGE_MASK)) >> PAGE_BITS
|
|
unsigned idx = file_offset + section_offset
|
|
if !pages[idx]:
|
|
pages[idx] = mem->alloc_page ()
|
|
pages[idx]->frame = thread_start[i] + (idx << PAGE_BITS)
|
|
pages[idx]->flags = Kernel::Page::PAYING | Kernel::Page::FRAME
|
|
++top_memory.limit
|
|
mem->use ()
|
|
if !mem->map (pages[idx], p, readonly):
|
|
panic (0x22446688, "unable to map initial page")
|
|
return
|
|
else:
|
|
if readonly:
|
|
panic (0x33399993, "unwritable bss section")
|
|
return
|
|
for unsigned p = (shdr->sh_addr & PAGE_MASK); p < shdr->sh_addr + shdr->sh_size; p += PAGE_SIZE:
|
|
kPage *page = mem->get_mapping (p, &readonly)
|
|
if !page:
|
|
page = mem->alloc_page ()
|
|
if !page:
|
|
panic (0x00220022, "out of memory")
|
|
return
|
|
page->frame = mem->zalloc ()
|
|
if !page->frame:
|
|
panic (0x02220022, "out of memory");
|
|
return
|
|
page->flags = Kernel::Page::PAYING | Kernel::Page::FRAME
|
|
if !mem->map (page, p):
|
|
panic (0x33557799, "unable to map initial bss page")
|
|
return
|
|
else:
|
|
if readonly:
|
|
panic (0x20203030, "bss section starts on read-only page")
|
|
return
|
|
for unsigned a = p; a < ((p + PAGE_SIZE) & PAGE_MASK); a += 4:
|
|
if a >= shdr->sh_addr + shdr->sh_size:
|
|
break
|
|
if a < shdr->sh_addr:
|
|
continue
|
|
((unsigned *)page->frame)[(a & ~PAGE_MASK) >> 2] = 0
|
|
for unsigned p = 0; p <= ((thread_start[i + 1] - thread_start[i] - 1) >> PAGE_BITS); ++p:
|
|
if pages[p]:
|
|
continue
|
|
++top_memory.limit
|
|
top_memory.pfree (thread_start[i] + (p << PAGE_BITS))
|
|
kPage *stackpage = mem->alloc_page ()
|
|
stackpage->frame = mem->zalloc ()
|
|
stackpage->flags = Kernel::Page::PAYING | Kernel::Page::FRAME
|
|
if !stackpage || !mem->map (stackpage, 0x7ffff000):
|
|
panic (0x13151719, "unable to map initial stack page")
|
|
return
|
|
thread->slot[0].caps = mem->alloc_caps (NUM_CAPS)
|
|
thread->slot[0].caps->first_slot.thread = thread
|
|
thread->slot[0].caps->first_slot.index = 0
|
|
thread->arch.a[0] = NUM_SLOTS
|
|
thread->arch.a[1] = NUM_CAPS
|
|
kReceiver *recv = mem->alloc_receiver ()
|
|
recv->owner = thread
|
|
thread->receivers = recv
|
|
thread->slot[0].caps->set (__receiver_num, (kReceiverP)(CAPTYPE_RECEIVER | CAP_MASTER), Kernel::Num ((unsigned)recv), kCapRef (), &recv->refs)
|
|
thread->slot[0].caps->set (__thread_num, (kReceiverP)(CAPTYPE_THREAD | CAP_MASTER), Kernel::Num ((unsigned)thread), kCapRef (), &thread->refs)
|
|
thread->slot[0].caps->set (__memory_num, (kReceiverP)(CAPTYPE_MEMORY | CAP_MASTER), Kernel::Num ((unsigned)mem), kCapRef (), &mem->refs)
|
|
thread->slot[0].caps->set (__call_num, (kReceiverP)(CAPTYPE_RECEIVER | Kernel::Receiver::CALL), Kernel::Num ((unsigned)recv), kCapRef (), &recv->refs)
|
|
thread->flags = Kernel::Thread::RUNNING | Kernel::Thread::PRIV
|
|
if !i:
|
|
first_scheduled = thread
|
|
init_receiver = recv
|
|
else:
|
|
thread->slot[0].caps->set (__parent_num, init_receiver, i, kCapRef (), &init_receiver->capabilities)
|
|
previous->schedule_next = thread
|
|
thread->schedule_prev = previous
|
|
thread->schedule_next = NULL
|
|
previous = thread
|
|
mem->pfree ((unsigned)pages)
|
|
|
|
// Initialize the kernel, finish by falling into the idle task.
|
|
void init (unsigned mem):
|
|
#ifndef NDEBUG
|
|
// Initialize board-specific things.
|
|
board_init ()
|
|
#endif
|
|
must_wait = false
|
|
// Initialize kernel variables to empty.
|
|
unsigned count = init_memory (mem)
|
|
// initialize system control coprocessor.
|
|
init_cp0 ()
|
|
// initialize everything about the idle task.
|
|
init_idle ()
|
|
// initialize top_memory.
|
|
top_memory.prev = NULL
|
|
top_memory.next = NULL
|
|
top_memory.address_space = NULL
|
|
top_memory.refs.reset ()
|
|
top_memory.pages = NULL
|
|
top_memory.threads = NULL
|
|
top_memory.memories = NULL
|
|
top_memory.limit = count
|
|
top_memory.used = 0
|
|
top_memory.arch.directory = NULL
|
|
top_memory.arch.asid = 0
|
|
|
|
// Record all asids as unused.
|
|
for unsigned i = 0; i < 63; ++i:
|
|
asids[i] = i + 1
|
|
asids[63] = 0
|
|
|
|
// Set up initial threads.
|
|
init_threads ()
|
|
|
|
// Unset all interrupt handlers.
|
|
for unsigned i = 0; i < 32; ++i:
|
|
arch_interrupt_receiver[i] = NULL
|
|
|
|
// Enable timer interrupts.
|
|
intc_unmask_irq (TIMER_INTERRUPT)
|
|
|
|
// Say we're handling an exception. Since we're going to enter the idle task, allow access to cp0.
|
|
// All interrupts enter the CPU through the interrupt controller at IP2, so enable that.
|
|
cp0_set (CP0_STATUS, 0x1000ff13)
|
|
|
|
// Done; return to user space (the idle task).
|
|
__asm__ volatile ("eret")
|