// The kernel stack. .lcomm kernel_stack, KERNEL_STACK_SIZE .globl run_idle .globl directory .set noat .set noreorder #define ARCH #define ASM #include "arch.hh" addr_000: #if 0 // TLB refill bne $zero, $k1, slow_refill nop bne $zero, $k0, slow_refill lw $k1, -0xd94($zero) mfc0 $k0, $CP0_ENTRY_HI srl $k0, $k0, 19 and $k0, $k0, 0x3fc addu $k0, $k0, $k1 beq $zero, $k0, zero_refill lw $k0, 0($k0) mfc0 $k1, $CP0_ENTRY_HI srl $k1, $k1, 10 and $k1, $k1, 0x1f8 add $k0, $k0, $k1 lw $k1, 0($k0) mtc0 $k1, $CP0_ENTRY_LO0 lw $k1, 4($k0) mtc0 $k1, $CP0_ENTRY_LO1 1: tlbwr move $zero, $k0 move $zero, $k1 eret zero_refill: mtc0 $zero, $CP_ENTRY_LO0 b 1b mtc0 $zero, $CP_ENTRY_LO1 slow_refill: move $k1, $zero #endif sw $ra, -0xd88($zero) bal save_regs nop la $t9, tlb_refill jr $t9 nop .fill 0x100 - (. - addr_000) addr_100: // Cache error sw $ra, -0xd88($zero) bal save_regs nop la $t9, cache_error jr $t9 nop .fill 0x180 - (. - addr_000) addr_180: // General exception // Allow new exceptions to update EPC and friends. //mtc0 $zero, $CP0_STATUS sw $ra, -0xd88($zero) bal save_regs nop la $t9, exception jr $t9 nop .fill 0x200 - (. - addr_000) - 8 .word 0x0000001e // 1f8 EntryLo data for idle page. .word 0x80000000 // 1fc A pointer to the current page. addr_200: // Interrupt sw $ra, -0xd88($zero) bal save_regs nop la $t9, interrupt jr $t9 nop .fill 0x280 - (. - addr_000) - 20 directory: .word 0 // -d94 == directory // space for save_regs .word 0 // -d90 == k0 .word idle // -d8c == current .word 0 // -d88 == ra .word _gp // -d84 == gp start_idle: // 280 // Wait for the next interrupt, then the first thread will be scheduled. // It is impractical to try to call schedule, because for that the // idle task would need to own capabilities. move $v0, $zero syscall nop 1: wait b 1b nop // TODO: save only fragile registers now, the rest on task switch. kernel_exit: lw $k0, SAVE_PC($v0) mtc0 $k0, $CP0_EPC lw $k0, SAVE_LO($v0) lw $k1, SAVE_HI($v0) mtlo $k0 mthi $k1 lw $v1, SAVE_V1($v0) lw $a0, SAVE_A0($v0) lw $a1, SAVE_A1($v0) lw $a2, SAVE_A2($v0) lw $a3, SAVE_A3($v0) lw $t0, SAVE_T0($v0) lw $t1, SAVE_T1($v0) lw $t2, SAVE_T2($v0) lw $t3, SAVE_T3($v0) lw $t4, SAVE_T4($v0) lw $t5, SAVE_T5($v0) lw $t6, SAVE_T6($v0) lw $t7, SAVE_T7($v0) lw $t8, SAVE_T8($v0) lw $t9, SAVE_T9($v0) lw $s0, SAVE_S0($v0) lw $s1, SAVE_S1($v0) lw $s2, SAVE_S2($v0) lw $s3, SAVE_S3($v0) lw $s4, SAVE_S4($v0) lw $s5, SAVE_S5($v0) lw $s6, SAVE_S6($v0) lw $s7, SAVE_S7($v0) lw $fp, SAVE_FP($v0) lw $ra, SAVE_RA($v0) lw $at, SAVE_AT($v0) lw $k0, SAVE_K0($v0) lw $k1, SAVE_V0($v0) sw $k1, -0xd90($zero) lw $k1, SAVE_K1($v0) sw $v0, -0xd8c($zero) lw $sp, SAVE_SP($v0) lw $gp, SAVE_GP($v0) lw $v0, -0xd90($zero) eret save_regs: sw $k0, -0xd90($zero) lw $k0, -0xd8c($zero) sw $at, SAVE_AT($k0) sw $gp, SAVE_GP($k0) sw $sp, SAVE_SP($k0) sw $fp, SAVE_FP($k0) sw $k1, SAVE_K1($k0) lw $k1, -0xd90($zero) sw $k1, SAVE_K0($k0) lw $k1, -0xd88($zero) sw $k1, SAVE_RA($k0) sw $v0, SAVE_V0($k0) sw $v1, SAVE_V1($k0) sw $a0, SAVE_A0($k0) sw $a1, SAVE_A1($k0) sw $a2, SAVE_A2($k0) sw $a3, SAVE_A3($k0) sw $t0, SAVE_T0($k0) sw $t1, SAVE_T1($k0) sw $t2, SAVE_T2($k0) sw $t3, SAVE_T3($k0) sw $t4, SAVE_T4($k0) sw $t5, SAVE_T5($k0) sw $t6, SAVE_T6($k0) sw $t7, SAVE_T7($k0) sw $t8, SAVE_T8($k0) sw $t9, SAVE_T9($k0) sw $s0, SAVE_S0($k0) sw $s1, SAVE_S1($k0) sw $s2, SAVE_S2($k0) sw $s3, SAVE_S3($k0) sw $s4, SAVE_S4($k0) sw $s5, SAVE_S5($k0) sw $s6, SAVE_S6($k0) sw $s7, SAVE_S7($k0) mfhi $v0 mflo $v1 sw $v0, SAVE_HI($k0) sw $v1, SAVE_LO($k0) mfc0 $k1, $CP0_EPC sw $k1, SAVE_PC($k0) lw $gp, -0xd84($zero) la $sp, kernel_stack + KERNEL_STACK_SIZE move $t9, $ra la $ra, kernel_exit jr $t9 nop .globl thread0 .globl thread1 .globl thread2 .balign 0x1000 thread0: .incbin "thread0" .balign 0x1000 thread1: .incbin "thread1" thread2: