1
0
mirror of git://projects.qi-hardware.com/iris.git synced 2024-11-05 12:23:09 +02:00
iris/mips/init.ccp

243 lines
9.0 KiB
Plaintext
Raw Normal View History

2009-05-14 21:31:47 +03:00
#pypp 0
2009-06-01 15:26:42 +03:00
// Iris: micro-kernel for a capability-based operating system.
// init.ccp: mips-specific boot code.
// Copyright 2009 Bas Wijnen <wijnen@debian.org>
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
2009-05-18 10:30:27 +03:00
// Also declare things which only work during kernel init.
#define INIT
2009-05-22 23:48:49 +03:00
#define ARCH
2009-06-01 15:26:42 +03:00
#include "../kernel.hh"
#include <elf.h>
2009-05-18 10:30:27 +03:00
2009-05-14 21:31:47 +03:00
static void init_idle ():
// initialize idle task as if it is currently running.
idle.prev = NULL
idle.next = NULL
idle.schedule_prev = NULL
idle.schedule_next = NULL
idle.address_space = &idle_memory
2009-05-25 01:31:35 +03:00
idle.refs = NULL
2009-05-27 19:33:05 +03:00
idle.flags = THREAD_FLAG_RUNNING | THREAD_FLAG_PRIV
2009-05-14 21:31:47 +03:00
// initialize idle_memory.
idle_memory.prev = NULL
idle_memory.next = NULL
2009-05-25 01:31:35 +03:00
idle_memory.address_space = NULL
idle_memory.refs = NULL
2009-05-14 21:31:47 +03:00
idle_memory.pages = &idle_page
idle_memory.threads = &idle
idle_memory.memories = NULL
idle_memory.limit = 0
idle_memory.used = 0
2009-05-20 23:07:56 +03:00
idle_memory.arch.directory = (unsigned **)0x80000000
2009-05-27 15:38:52 +03:00
// Shadow is never used for the idle task.
idle_memory.arch.shadow = NULL
2009-05-19 00:18:23 +03:00
idle_memory.arch.asid = 0
2009-05-14 21:31:47 +03:00
// initialize idle_page
idle_page.prev = NULL
idle_page.next = NULL
2009-05-29 00:35:27 +03:00
idle_page.data.frame = 0x80000000
2009-06-01 02:12:54 +03:00
idle_page.data.flags = PAGE_FLAG_WRITABLE | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME
2009-05-25 01:31:35 +03:00
idle_page.refs = NULL
idle_page.address_space = NULL
2009-05-23 21:55:31 +03:00
current = &idle
directory = idle_memory.arch.directory
2009-05-14 21:31:47 +03:00
static void init_cp0 ():
2009-05-18 10:30:27 +03:00
// Set timer to a defined value
2009-05-19 00:18:23 +03:00
cp0_set (CP0_COMPARE, 1000000)
2009-05-18 10:30:27 +03:00
// Reset timer
2009-05-19 00:18:23 +03:00
cp0_set0 (CP0_COUNT)
2009-05-18 10:30:27 +03:00
// Use the interrupt vector for interrupts
2009-05-19 00:18:23 +03:00
cp0_set (CP0_CAUSE, 1 << 23)
2009-05-14 21:31:47 +03:00
// clear the tlb, hardwire page 0 to 0xffffffff
// and soft-wire it to (0x294 << 20) + (0x290 << 10)
// (for the idle task).
2009-05-19 00:18:23 +03:00
cp0_set (CP0_WIRED, 1)
cp0_set0 (CP0_PAGE_MASK)
cp0_set0 (CP0_ENTRY_LO0)
cp0_set0 (CP0_ENTRY_LO1)
2009-05-14 21:31:47 +03:00
// Get number of tlb entries (is 31).
2009-05-22 23:48:49 +03:00
unsigned num
cp0_get (CP0_CONFIG1, num)
2009-05-14 21:31:47 +03:00
num >>= 25
num &= 0x3f
// Clear the tlb.
2009-05-22 23:48:49 +03:00
for unsigned i = 1; i <= num; ++i:
// with asid 0, no page faults will be triggered, so it's safe to map memory anywhere.
cp0_set (CP0_ENTRY_HI, 0x2000 * i)
2009-05-19 00:18:23 +03:00
cp0_set (CP0_INDEX, i)
2009-05-14 21:31:47 +03:00
// write the data.
__asm__ volatile ("tlbwi")
// Fill the upper page in kseg3.
2009-05-22 23:48:49 +03:00
cp0_set (CP0_ENTRY_HI, 0xffffe000)
2009-05-19 00:18:23 +03:00
cp0_set (CP0_ENTRY_LO0, 0x1d)
cp0_set (CP0_ENTRY_LO1, 0x1f)
cp0_set0 (CP0_INDEX)
2009-05-14 21:31:47 +03:00
__asm__ volatile ("tlbwi")
// Fill the idle task's page in useg. Set it to non-cachable.
2009-05-27 15:38:52 +03:00
// its directory entry is at 1fc, so it's number 7f (0fe00000).
// its table entry is at 1f8, so it's number 7e (0007e000).
// its address is 280 (00000280), used below for EPC.
unsigned const idle_entry_hi = 0x0fe7e000
cp0_set (CP0_ENTRY_HI, idle_entry_hi)
2009-05-19 00:18:23 +03:00
cp0_set (CP0_ENTRY_LO0, 0x16)
cp0_set (CP0_ENTRY_LO1, 0x14)
2009-05-14 21:31:47 +03:00
__asm__ volatile ("tlbwr")
// Allow eret to be used to jump to the idle task.
2009-05-27 15:38:52 +03:00
cp0_set (CP0_EPC, (idle_entry_hi & PAGE_MASK) | 0x280)
2009-05-23 21:55:31 +03:00
// Wait with initializing the status register until the last moment, so that
// exceptions in the bootup code will fill EPC and friends.
2009-05-18 10:30:27 +03:00
2009-05-27 19:33:05 +03:00
// This returns unsigned, because the value is used to fill thread->arch.a*.
static unsigned mkcap (Memory *mem, unsigned type, void *obj):
return (unsigned)mem->alloc_capability ((Receiver *)type, NULL, &mem->capabilities, (unsigned)obj)
2009-05-18 10:30:27 +03:00
static void init_threads ():
2009-05-23 21:55:31 +03:00
Thread *previous = NULL
first_scheduled = NULL
2009-05-18 10:30:27 +03:00
for unsigned i = 0; i < NUM_THREADS; ++i:
Memory *mem = top_memory.alloc_memory ()
Thread *thread = mem->alloc_thread ()
2009-05-23 21:55:31 +03:00
Page **pages = (Page **)mem->zalloc ()
2009-05-22 23:48:49 +03:00
Elf32_Ehdr *header = (Elf32_Ehdr *)thread_start[i]
for unsigned j = 0; j < SELFMAG; ++j:
if header->e_ident[j] != ELFMAG[j]:
panic (i * 0x1000 + j, "invalid ELF magic")
if header->e_ident[EI_CLASS] != ELFCLASS32:
panic (i * 0x1000 + EI_CLASS, "invalid ELF class")
if header->e_ident[EI_DATA] != ELFDATA2LSB:
panic (i * 0x1000 + EI_DATA, "invalid ELF data")
if header->e_ident[EI_VERSION] != EV_CURRENT:
panic (i * 0x1000 + EI_VERSION, "invalid ELF version")
if header->e_type != ET_EXEC:
panic (i * 0x1000 + 0x10, "invalid ELF type")
2009-05-23 21:55:31 +03:00
if header->e_machine != EM_MIPS_RS3_LE && header->e_machine != EM_MIPS:
panic (i * 0x1000 + 0x12, "invalid ELF machine")
2009-05-22 23:48:49 +03:00
thread->pc = header->e_entry
thread->sp = 0x80000000
for unsigned section = 0; section < header->e_shnum; ++section:
Elf32_Shdr *shdr = (Elf32_Shdr *)(thread_start[i] + header->e_shoff + section * header->e_shentsize)
if !(shdr->sh_flags & SHF_ALLOC):
continue
bool writable = shdr->sh_flags & SHF_WRITE
//bool executable = shdr->sh_flags & SHF_EXEC_INSTR
if shdr->sh_type != SHT_NOBITS:
for unsigned p = (shdr->sh_addr & PAGE_MASK); p <= ((shdr->sh_addr + shdr->sh_size - 1) & PAGE_MASK); p += PAGE_SIZE:
unsigned idx = (p - (shdr->sh_addr & PAGE_MASK)) >> PAGE_BITS
if !pages[idx]:
pages[idx] = mem->alloc_page ()
2009-05-29 00:35:27 +03:00
pages[idx]->data.frame = thread_start[i] + (idx << PAGE_BITS)
2009-06-01 02:12:54 +03:00
pages[idx]->data.flags = (writable ? PAGE_FLAG_WRITABLE : 0) | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME
2009-05-22 23:48:49 +03:00
++top_memory.limit
if !mem->map (pages[idx], p, writable):
panic (0x22446688, "unable to map initial page")
else:
for unsigned p = (shdr->sh_addr & PAGE_MASK); p <= ((shdr->sh_addr + shdr->sh_size - 1) & PAGE_MASK); p += PAGE_SIZE:
2009-05-24 13:22:22 +03:00
bool write = false
Page *page = mem->get_mapping (p, &write)
2009-05-22 23:48:49 +03:00
if !page:
page = mem->alloc_page ()
if !page:
panic (0x00220022, "out of memory")
2009-05-29 00:35:27 +03:00
page->data.frame = mem->zalloc ()
2009-06-01 02:12:54 +03:00
page->data.flags = (writable ? PAGE_FLAG_WRITABLE : 0) | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME
2009-05-29 00:35:27 +03:00
if !page->data.frame || !mem->map (page, p, true):
2009-05-22 23:48:49 +03:00
panic (0x33557799, "unable to map initial bss page")
else:
2009-05-24 13:22:22 +03:00
if !write:
panic (0x20203030, "bss section starts on read-only page")
2009-05-22 23:48:49 +03:00
for unsigned a = p; a < p + PAGE_SIZE; a += 4:
if a >= shdr->sh_addr + shdr->sh_size:
break
if a < shdr->sh_addr:
continue
2009-05-29 00:35:27 +03:00
((unsigned *)page->data.frame)[(a & ~PAGE_MASK) >> 2] = 0
2009-05-22 23:48:49 +03:00
for unsigned p = 0; p <= ((thread_start[i + 1] - thread_start[i] - 1) >> PAGE_BITS); ++p:
if pages[p]:
continue
++top_memory.limit
2009-05-24 13:22:22 +03:00
top_memory.pfree (thread_start[i] + (p << PAGE_BITS))
2009-05-22 23:48:49 +03:00
Page *stackpage = mem->alloc_page ()
2009-05-29 00:35:27 +03:00
stackpage->data.frame = mem->zalloc ()
2009-06-01 02:12:54 +03:00
stackpage->data.flags = PAGE_FLAG_WRITABLE | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME
2009-05-22 23:48:49 +03:00
if !stackpage || !mem->map (stackpage, 0x7ffff000, true):
panic (0x13151719, "unable to map initial stack page")
2009-05-27 19:33:05 +03:00
Receiver *recv = mem->alloc_receiver ()
2009-06-08 14:46:13 +03:00
recv->owner = thread
thread->receivers = recv
2009-05-27 19:33:05 +03:00
thread->arch.a0 = mkcap (mem, CAPTYPE_RECEIVER | CAP_RECEIVER_ALL_RIGHTS, recv)
thread->arch.a1 = mkcap (mem, CAPTYPE_THREAD | CAP_THREAD_ALL_PRIV_RIGHTS, thread)
thread->arch.a2 = mkcap (mem, CAPTYPE_MEMORY | CAP_MEMORY_ALL_RIGHTS, mem)
2009-06-08 14:46:13 +03:00
thread->arch.a3 = mkcap (mem, CAPTYPE_RECEIVER | (1 << CAP_RECEIVER_CALL), recv)
2009-05-22 23:48:49 +03:00
mem->pfree ((unsigned)pages)
2009-05-27 19:33:05 +03:00
thread->flags = THREAD_FLAG_RUNNING | THREAD_FLAG_PRIV
2009-05-23 21:55:31 +03:00
thread->schedule_next = NULL
thread->schedule_prev = previous
if previous:
previous->schedule_next = thread
else:
first_scheduled = thread
previous = thread
2009-05-14 21:31:47 +03:00
2009-05-22 23:48:49 +03:00
// Initialize the kernel, finish by falling into the idle task.
2009-05-14 21:31:47 +03:00
extern unsigned _end
void init ():
2009-05-23 21:55:31 +03:00
// Disable interrupts and set interrupt vectors to normal.
cp0_set0 (CP0_STATUS)
2009-05-14 21:31:47 +03:00
// Initialize kernel variables to empty.
sleepers = NULL
runners = NULL
zero_pages = NULL
// Fill junk pages with all memory not currently used.
2009-05-25 01:31:35 +03:00
junk_pages = (FreePage *)(((unsigned)&_end + ~PAGE_MASK) & PAGE_MASK)
2009-05-14 21:31:47 +03:00
FreePage *p, *next
unsigned count = 1
2009-05-25 01:31:35 +03:00
for p = junk_pages, next = p; (unsigned)next - 0x80000000 < (1 << 27); p = next, next = (FreePage *)((unsigned)p + ~PAGE_MASK + 1):
2009-05-14 21:31:47 +03:00
p->next = next
++count
p->next = NULL
// initialize system control coprocessor.
init_cp0 ()
// initialize everything about the idle task.
init_idle ()
// initialize top_memory.
top_memory.prev = NULL
top_memory.next = NULL
2009-05-25 01:31:35 +03:00
top_memory.address_space = NULL
top_memory.refs = NULL
2009-05-14 21:31:47 +03:00
top_memory.pages = NULL
top_memory.threads = NULL
top_memory.memories = NULL
top_memory.limit = count
top_memory.used = 0
2009-05-19 00:18:23 +03:00
top_memory.arch.directory = NULL
top_memory.arch.asid = 0
2009-05-24 13:22:22 +03:00
for unsigned i = 0; i < 63; ++i:
asids[i] = i + 1
asids[63] = 0
2009-05-18 10:30:27 +03:00
init_threads ()
2009-05-14 21:31:47 +03:00
2009-06-01 02:12:54 +03:00
// Say we're handling an exception. Don't enable interrupts; this will happen when handlers are registered.
2009-05-23 21:55:31 +03:00
// Since we're going to enter the idle task, allow access to cp0.
2009-06-01 02:12:54 +03:00
cp0_set (CP0_STATUS, 0x10000013)
2009-05-23 21:55:31 +03:00
2009-05-14 21:31:47 +03:00
// Done; return to user space (the idle task).
__asm__ volatile ("eret")