#pypp 0 // Iris: micro-kernel for a capability-based operating system. // mips/init.ccp: mips-specific boot code. // Copyright 2009 Bas Wijnen // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see . // Also declare things which only work during kernel init. #define INIT #define ARCH #include "../kernel.hh" #include static void init_idle (): // initialize idle task as if it is currently running. idle.prev = NULL idle.next = NULL idle.schedule_prev = NULL idle.schedule_next = NULL idle.address_space = &idle_memory idle.refs = NULL idle.flags = THREAD_FLAG_RUNNING | THREAD_FLAG_PRIV // initialize idle_memory. idle_memory.prev = NULL idle_memory.next = NULL idle_memory.address_space = NULL idle_memory.refs = NULL idle_memory.pages = &idle_page idle_memory.threads = &idle idle_memory.memories = NULL idle_memory.limit = 0 idle_memory.used = 0 idle_memory.arch.directory = (unsigned **)0x80000000 // Shadow is never used for the idle task. idle_memory.arch.shadow = NULL idle_memory.arch.asid = 0 // initialize idle_page idle_page.prev = NULL idle_page.next = NULL idle_page.data.frame = 0x80000000 idle_page.data.flags = PAGE_FLAG_WRITABLE | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME idle_page.refs = NULL idle_page.address_space = NULL current = &idle directory = idle_memory.arch.directory static void init_cp0 (): // Set timer to a defined value cp0_set (CP0_COMPARE, 1000000) // Reset timer cp0_set0 (CP0_COUNT) // Use the interrupt vector for interrupts cp0_set (CP0_CAUSE, 1 << 23) // clear the tlb, hardwire page 0 to 0xffffffff // and soft-wire it to (0x294 << 20) + (0x290 << 10) // (for the idle task). cp0_set (CP0_WIRED, 1) cp0_set0 (CP0_PAGE_MASK) cp0_set0 (CP0_ENTRY_LO0) cp0_set0 (CP0_ENTRY_LO1) // Get number of tlb entries (is 31). unsigned num cp0_get (CP0_CONFIG1, num) num >>= 25 num &= 0x3f // Clear the tlb. for unsigned i = 1; i <= num; ++i: // with asid 0, no page faults will be triggered, so it's safe to map memory anywhere. cp0_set (CP0_ENTRY_HI, 0x2000 * i) cp0_set (CP0_INDEX, i) // write the data. __asm__ volatile ("tlbwi") // Fill the upper page in kseg3. cp0_set (CP0_ENTRY_HI, 0xffffe000) cp0_set (CP0_ENTRY_LO0, 0x1d) cp0_set (CP0_ENTRY_LO1, 0x1f) cp0_set0 (CP0_INDEX) __asm__ volatile ("tlbwi") // Fill the idle task's page in useg. Set it to non-cachable. // its directory entry is at 1fc, so it's number 7f (0fe00000). // its table entry is at 1f8, so it's number 7e (0007e000). // its address is 280 (00000280), used below for EPC. unsigned const idle_entry_hi = 0x0fe7e000 cp0_set (CP0_ENTRY_HI, idle_entry_hi) cp0_set (CP0_ENTRY_LO0, 0x16) cp0_set (CP0_ENTRY_LO1, 0x14) __asm__ volatile ("tlbwr") // Allow eret to be used to jump to the idle task. cp0_set (CP0_EPC, (idle_entry_hi & PAGE_MASK) | 0x280) // Wait with initializing the status register until the last moment, so that // exceptions in the bootup code will fill EPC and friends. // This returns unsigned, because the value is used to fill thread->arch.a*. static unsigned mkcap (Memory *mem, unsigned type, void *obj): return (unsigned)mem->alloc_capability ((Receiver *)type, NULL, &mem->capabilities, (unsigned)obj) static void init_threads (): Thread *previous = NULL first_scheduled = NULL first_alarm = NULL Receiver *init_receiver = NULL for unsigned i = 0; i < NUM_THREADS; ++i: Memory *mem = top_memory.alloc_memory () assert (mem) Thread *thread = mem->alloc_thread () Page **pages = (Page **)mem->zalloc () Elf32_Ehdr *header = (Elf32_Ehdr *)thread_start[i] for unsigned j = 0; j < SELFMAG; ++j: if header->e_ident[j] != ELFMAG[j]: panic (i * 0x1000 + j, "invalid ELF magic") if header->e_ident[EI_CLASS] != ELFCLASS32: panic (i * 0x1000 + EI_CLASS, "invalid ELF class") if header->e_ident[EI_DATA] != ELFDATA2LSB: panic (i * 0x1000 + EI_DATA, "invalid ELF data") if header->e_ident[EI_VERSION] != EV_CURRENT: panic (i * 0x1000 + EI_VERSION, "invalid ELF version") if header->e_type != ET_EXEC: panic (i * 0x1000 + 0x10, "invalid ELF type") if header->e_machine != EM_MIPS_RS3_LE && header->e_machine != EM_MIPS: panic (i * 0x1000 + 0x12, "invalid ELF machine") thread->pc = header->e_entry thread->sp = 0x80000000 for unsigned section = 0; section < header->e_shnum; ++section: Elf32_Shdr *shdr = (Elf32_Shdr *)(thread_start[i] + header->e_shoff + section * header->e_shentsize) if ~shdr->sh_flags & SHF_ALLOC: continue bool writable = shdr->sh_flags & SHF_WRITE //bool executable = shdr->sh_flags & SHF_EXEC_INSTR if shdr->sh_type != SHT_NOBITS: unsigned file_offset = shdr->sh_offset >> PAGE_BITS if ((file_offset + shdr->sh_size) >> PAGE_BITS) >= (PAGE_SIZE >> 2): panic (0x87446809, "initial thread too large") for unsigned p = (shdr->sh_addr & PAGE_MASK); p < shdr->sh_addr + shdr->sh_size; p += PAGE_SIZE: unsigned section_offset = (p - (shdr->sh_addr & PAGE_MASK)) >> PAGE_BITS unsigned idx = file_offset + section_offset if !pages[idx]: pages[idx] = mem->alloc_page () pages[idx]->data.frame = thread_start[i] + (idx << PAGE_BITS) pages[idx]->data.flags = PAGE_FLAG_WRITABLE | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME ++top_memory.limit mem->use () if !mem->map (pages[idx], p, writable): panic (0x22446688, "unable to map initial page") else: if !writable: panic (0x33399993, "unwritable bss section") for unsigned p = (shdr->sh_addr & PAGE_MASK); p < shdr->sh_addr + shdr->sh_size; p += PAGE_SIZE: Page *page = mem->get_mapping (p, &writable) if !page: page = mem->alloc_page () if !page: panic (0x00220022, "out of memory") page->data.frame = mem->zalloc () if !page->data.frame: panic (0x02220022, "out of memory"); page->data.flags = PAGE_FLAG_WRITABLE | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME if !mem->map (page, p, true): panic (0x33557799, "unable to map initial bss page") else: if !writable: panic (0x20203030, "bss section starts on read-only page") for unsigned a = p; a < ((p + PAGE_SIZE) & PAGE_MASK); a += 4: if a >= shdr->sh_addr + shdr->sh_size: break if a < shdr->sh_addr: continue ((unsigned *)page->data.frame)[(a & ~PAGE_MASK) >> 2] = 0 for unsigned p = 0; p <= ((thread_start[i + 1] - thread_start[i] - 1) >> PAGE_BITS); ++p: if pages[p]: continue ++top_memory.limit top_memory.pfree (thread_start[i] + (p << PAGE_BITS)) Page *stackpage = mem->alloc_page () stackpage->data.frame = mem->zalloc () stackpage->data.flags = PAGE_FLAG_WRITABLE | PAGE_FLAG_PAYING | PAGE_FLAG_FRAME if !stackpage || !mem->map (stackpage, 0x7ffff000, true): panic (0x13151719, "unable to map initial stack page") Receiver *recv = mem->alloc_receiver () recv->owner = thread thread->receivers = recv thread->arch.a0 = mkcap (mem, CAPTYPE_RECEIVER | CAP_RECEIVER_ALL_RIGHTS, recv) thread->arch.a1 = mkcap (mem, CAPTYPE_THREAD | CAP_THREAD_ALL_PRIV_RIGHTS, thread) thread->arch.a2 = mkcap (mem, CAPTYPE_MEMORY | CAP_MEMORY_ALL_RIGHTS, mem) thread->arch.a3 = mkcap (mem, CAPTYPE_RECEIVER | (1 << CAP_RECEIVER_CALL), recv) thread->flags = THREAD_FLAG_RUNNING | THREAD_FLAG_PRIV if !i: first_scheduled = thread init_receiver = recv else: thread->arch.t0 = mkcap (mem, (unsigned)init_receiver, (void *)i) previous->schedule_next = thread thread->schedule_prev = previous previous = thread mem->pfree ((unsigned)pages) thread->schedule_next = NULL // Initialize the kernel, finish by falling into the idle task. void init (unsigned mem): dbg_code = 0 // Disable interrupts and set interrupt vectors to normal. cp0_set0 (CP0_STATUS) // Initialize kernel variables to empty. unsigned count = init_memory (mem) // initialize system control coprocessor. init_cp0 () // initialize everything about the idle task. init_idle () // initialize top_memory. top_memory.prev = NULL top_memory.next = NULL top_memory.address_space = NULL top_memory.refs = NULL top_memory.pages = NULL top_memory.threads = NULL top_memory.memories = NULL top_memory.limit = count top_memory.used = 0 top_memory.arch.directory = NULL top_memory.arch.asid = 0 // Record all asids as unused. for unsigned i = 0; i < 63; ++i: asids[i] = i + 1 asids[63] = 0 // Set up initial threads. init_threads () // Disable all gpio interrupts and alternate functions initially. for unsigned i = 0; i < 4; ++i: GPIO_GPIER (i) = 0 GPIO_GPALR (i) = 0 GPIO_GPAUR (i) = 0 // Set up the rest of the hardware (copied from Linux). cpm_idle_mode () cpm_enable_cko1 () cpm_start_all () harb_set_priority (0x08) dmac_enable_all_channels () harb_usb0_uhc () gpio_as_emc () gpio_as_uart0 () gpio_as_dma () gpio_as_eth () gpio_as_usb () gpio_as_lcd_master () gpio_as_ssi() gpio_as_msc () // Start the operating system timer, and set it to give an interrupt immediately. // This is better, because the kernel starts with jumping into the idle task and // waiting for the first interrupt. unsigned latch = (JZ_EXTAL + (HZ >> 1)) / HZ ost_disable_all () ost_set_mode (0, OST_TCSR_UIE | OST_TCSR_CKS_EXTAL) ost_set_reload (0, latch) ost_set_count (0, 1) ost_set_mode (0, OST_TCSR_UIE | OST_TCSR_CKS_EXTAL) ost_enable_channel (0) intc_unmask_irq (IRQ_OST0) // Unset all interrupt handlers. for unsigned i = 0; i < 32; ++i: arch_interrupt_receiver[i] = NULL // Say we're handling an exception. Since we're going to enter the idle task, allow access to cp0. // All interrupts enter the CPU through the interrupt controller at IP2, so enable that. cp0_set (CP0_STATUS, 0x1000ff13) // Done; return to user space (the idle task). __asm__ volatile ("eret")