1
0
mirror of git://projects.qi-hardware.com/iris.git synced 2024-11-04 23:19:41 +02:00

start with allocation

This commit is contained in:
Bas Wijnen 2009-05-18 09:30:27 +02:00
parent 5d9d12b860
commit e56cf6bb64
5 changed files with 293 additions and 75 deletions

View File

@ -8,7 +8,7 @@ LD = $(CROSS)ld
OBJCOPY = $(CROSS)objcopy
OBJDUMP = $(CROSS)objdump
kernel_sources = interrupts.cc panic.cc data.cc test.cc
kernel_sources = interrupts.cc panic.cc data.cc test.cc alloc.cc
boot_sources = init.cc
BUILT_SOURCES = $(kernel_sources) $(boot_sources)
@ -22,7 +22,10 @@ PYPP = /usr/bin/pypp
uimage: all.raw Makefile
mkimage -A MIPS -O Linux -C none -a $(load) -e 0x$(shell /bin/sh -c '$(OBJDUMP) -t all | grep __start$$ | cut -b-8') -n "Shevek's kernel" -d $< $@ | sed -e 's/:/;/g'
%.o:%.cc Makefile kernel.hh
arch.hh: mips.hh
ln -s $< $@
%.o:%.cc Makefile kernel.hh arch.hh
$(CC) $(CPPFLAGS) $(CXXFLAGS) -c $< -o $@
%.o:%.S Makefile
@ -40,6 +43,6 @@ junk = mdebug.abi32 reginfo comment pdr
gzip < $< > $@
clean:
rm -f all uimage *.o all.raw.gz
rm -f all uimage *.o all.raw.gz arch.hh
.PHONY: clean

142
alloc.ccp Normal file
View File

@ -0,0 +1,142 @@
#include "kernel.hh"
void *Memory::palloc ():
if use >= limit:
return NULL
FreePage *ret = junk_pages
if !ret:
ret = zero_pages
zero_pages = ret->next
else:
junk_pages = ret->next
return ret
void *Memory::zalloc ():
if use >= limit:
return NULL
FreePage *ret = zero_pages
if !ret:
ret = junk_pages
for unsigned i = 1; i < PAGE_SIZE; ++i:
((unsigned *)ret)[i] = 0
junk_pages = ret->next
else:
zero_pages = ret->next
ret->next = NULL
return ret
void Memory::pfree (void *page):
FreePage *p = (FreePage *)page
p->next = junk_pages
junk_pages = p
void Memory::zfree (void *page):
FreePage *p = (FreePage *)page
p->next = zero_pages
zero_pages = p
void *Memory::search_free (unsigned size, void *&first):
Free *f
unsigned s = 0
for f = frees; f; f = f->next:
if f->next_obj:
s = f->next_obj - f
else:
s = PAGE_SIZE - ((unsigned)f & ~PAGE_MASK)
if s >= size:
break
if !f:
f = (Free *)palloc ()
if !f:
return NULL
f->marker = ~0
f->next = frees
f->prev = NULL
frees = f
if f->next:
f->next->prev = f
f->next_obj = NULL
f->prev_obj = NULL
s = PAGE_SIZE
// We have a free block, possibly too large.
if s >= size + sizeof (Free):
// Create the new object at the end and keep the Free.
Free *obj = (Free *)((unsigned)f + s - size)
obj->next_obj = f->next_obj
if (obj->next_obj)
obj->next_obj->prev_obj = obj
obj->prev_obj = f
f->next_obj = obj
f = obj
else:
if f->prev:
f->prev->next = f->next
else:
frees = f->next
if f->next:
f->next->prev = f->prev
f->next = first
f->prev = NULL
if f->next:
f->next->prev = f
first = f
return f
void Object_base::free_obj (Memory *parent):
Free *self
// Merge with previous, if it exists and is a Free.
if prev_obj && prev_obj->is_free ():
self = (Free *)prev_obj
self->next_obj = next_obj
if next_obj:
next_obj->prev_obj = self
else:
self = (Free *)this
self->next = parent->frees
self->prev = NULL
if self->next:
self->next->prev = self
parent->frees = self
self->marker = ~0
// Merge with next, if it exists and is a Free.
if self->next_obj && self->next_obj->is_free ():
self->next_obj = self->next_obj->next_obj
if self->next_obj:
self->next_obj->prev_obj = self
// Free page if the resulting object is the only thing in it.
if !self->prev_obj && !self->next_obj:
if self->next:
self->next->prev = self->prev
if self->prev:
self->prev->next = self->next
else:
parent->frees = self->next
parent->pfree (self)
Page *Memory::alloc_page ():
Page *ret = (Page *)search_free (sizeof (Page), pages)
ret->physical = zalloc ()
if !ret->physical:
ret->free (this, pages)
return ret
Thread *Memory::alloc_thread ():
Thread *ret = (Thread *)search_free (sizeof (Thread), threads)
ret->address_space = this
ret->pc = 0
ret->sp = 0
Thread_arch_init (ret)
ret->schedule_prev = NULL
ret->schedule_next = NULL
return ret
Memory *Memory::alloc_memory ():
Memory *ret = (Memory *)search_free (sizeof (Memory), memories)
ret->frees = NULL
ret->pages = NULL
ret->threads = NULL
ret->memories = NULL
ret->limit = ~0
ret->used = 0
Memory_arch_init (ret)
return ret

View File

@ -1,6 +1,26 @@
#pypp 0
// Also declare things which only work during kernel init.
#define INIT
#include "kernel.hh"
#define cp0_get(reg, sel, target) do { __asm__ volatile ("mfc0 %0, $" #reg ", " #sel : "=r (target)); } while (0)
#define cp0_set(reg, value) do { __asm__ volatile ("mtc0 %0, $" #reg :: "r (value)); } while (0)
#define cp0_set0(reg) do { __asm__ volatile ("mtc0 $zero, $" #reg); } while (0)
// cp0 registers.
#define INDEX 0
#define ENTRY_LO0 2
#define ENTRY_LO1 3
#define PAGE_MASK 5
#define WIRED 6
#define COUNT 9
#define ENTRY_HI 10
#define COMPARE 11
#define STATUS 12
#define CAUSE 13
#define EPC 14
#define CONFIG 16
static void init_idle ():
// initialize idle task as if it is currently running.
idle.size = sizeof (Thread)
@ -34,58 +54,57 @@ static void init_idle ():
idle_page.physical = 0
static void init_cp0 ():
// Set timer to a defined value (11 is Compare)
__asm__ volatile ("mtc0 %0, $11, 0" :: "r"(1000000))
// Reset timer (9 is Count)
__asm__ volatile ("mtc0 $zero, $9, 0")
// Use the interrupt vector for interrupts (13 is Cause)
__asm__ volatile ("mtc0 %0, $13" :: "r"(1 << 23))
// Set timer to a defined value
cp0_set (COMPARE, 1000000)
// Reset timer
cp0_set0 (COUNT)
// Use the interrupt vector for interrupts
cp0_set (CAUSE, 1 << 23)
// clear the tlb, hardwire page 0 to 0xffffffff
// and soft-wire it to (0x294 << 20) + (0x290 << 10)
// (for the idle task).
// 6 is Wired.
__asm__ volatile ("mtc0 %0, $6" :: "r"(1))
// 5 is PageMask.
__asm__ volatile ("mtc0 $zero, $5")
// 2 is EntryLo0.
__asm__ volatile ("mtc0 $zero, $2")
// 3 is EntryLo1.
__asm__ volatile ("mtc0 $zero, $3")
cp0_set (WIRED, 1)
cp0_set0 (PAGE_MASK)
cp0_set0 (ENTRY_LO0)
cp0_set0 (ENTRY_LO1)
// Get number of tlb entries (is 31).
unsigned num;
__asm__ volatile ("mfc0 %0, $16, 1" : "=r"(num))
cp0_get (CONFIG, 1, num)
num >>= 25
num &= 0x3f
// Clear the tlb.
#if 0
for unsigned i = 1; i < num; ++i:
// this address doesn't reach the tlb, so it can't trigger exceptions.
// 10 is EntryHi
__asm__ volatile ("mtc0 %0, $10" :: "r"(0x70000000 + 0x1000 * i))
// 0 is Index.
__asm__ volatile ("mtc0 %0, $0" :: "r"(i))
cp0_set (ENTRY_HI, 0x70000000 + 0x1000 * i)
cp0_set (INDEX, i)
// write the data.
__asm__ volatile ("tlbwi")
#endif
// Fill the upper page in kseg3.
__asm__ volatile ("mtc0 %0, $10" :: "r"(0xfffff000))
__asm__ volatile ("mtc0 %0, $2" :: "r"(0x0000001d))
__asm__ volatile ("mtc0 %0, $3" :: "r"(0x0000001f))
__asm__ volatile ("mtc0 %0, $0" :: "r"(0))
cp0_set (ENTRY_HI, 0xfffff000)
cp0_set (ENTRY_LO0, 0x1d)
cp0_set (ENTRY_LO1, 0x1f)
cp0_set0 (INDEX)
__asm__ volatile ("tlbwi")
// Fill the idle task's page in useg. Set it to non-cachable.
__asm__ volatile ("mtc0 %0, $10" :: "r"(0x284a0000))
__asm__ volatile ("mtc0 %0, $2" :: "r"(0x00000016))
__asm__ volatile ("mtc0 %0, $3" :: "r"(0x00000014))
cp0_set (ENTRY_HI, 0x284a0000)
cp0_set (ENTRY_LO0, 0x16)
cp0_set (ENTRY_LO1, 0x14)
__asm__ volatile ("tlbwr")
// Allow eret to be used to jump to the idle task.
// 14 is EPC.
__asm__ volatile ("mtc0 %0, $14" :: "r"(0x284a0288))
cp0_set (EPC, 0x284a0288)
// Enable all interrupts and say we're handling an exception.
// 12 is Status.
__asm__ volatile ("mtc0 %0, $12" :: "r"(0x1000ff13))
// Since we're going to enter the idle task, allow access to cp0.
cp0_set (STATUS, 0x1000ff13)
static void init_threads ():
for unsigned i = 0; i < NUM_THREADS; ++i:
Memory *mem = top_memory.alloc_memory ()
Thread *thread = mem->alloc_thread ()
// TODO
/// Initialize the kernel, finish by falling into the idle task.
extern unsigned _end
@ -119,7 +138,8 @@ void init ():
top_memory.used = 0
top_memory.cpu.directory = NULL
top_memory.cpu.asid = 0
// TOOO: set up initial threads.
init_threads ()
// Done; return to user space (the idle task).
__asm__ volatile ("eret")

View File

@ -2,66 +2,75 @@
#ifndef _KERNEL_HH
#define _KERNEL_HH
#ifndef EXTERN
#define EXTERN extern
#endif
#define NULL 0
class Object
class Page
class Thread
class Memory
struct Object_base
struct Object
struct Page
struct Thread
struct Memory
struct Object:
unsigned size
Object *prev, *next
#include "arch.hh"
struct Page : public Object:
Page *page_prev, *page_next
unsigned physical
struct Object_base:
// Next and previous object of any type in the same page.
Object *prev_obj, *next_obj
void free_obj (Memory *parent)
inline bool is_free ()
struct Thread : public Object:
struct Cpu:
unsigned at, v0, v1, a0, a1, a2, a3
unsigned t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
unsigned gp, sp, fp, ra, hi, lo, k0, k1, pc
Cpu cpu
Thread *thread_prev, *thread_next
Thread *schedule_prev, *schedule_next
template <typename _T> struct Object : public Object_base:
// Next and previous object of the same type in any page.
_T *prev, *next
void free (Memory *parent, _T *&first)
struct Free : public Object <Free>
// This marker is ~0. No other kernel structure may allow this value
// at this point. It is used to recognize free chunks.
unsigned marker
bool Object_base::is_free ():
return ((Free *)this)->marker == ~0
struct Page : public Object <Page>:
void *physical
struct Thread : public Object <Thread>:
Memory *address_space
unsigned pc, sp
Thread_arch arch
Thread *schedule_prev, *schedule_next
struct Memory : public Object:
Memory *memory_prev, *memory_next
struct Memory : public Object <Memory>:
Free *frees
Page *pages
Thread *threads
Memory *memories
unsigned limit, used
struct Cpu:
unsigned asid
Page ***directory
Cpu cpu
Memory_arch arch
// Allocation of pages.
void *palloc ()
void *zalloc ()
void pfree (void *page)
void zfree (void *page)
// Allocation routines for kernel structures
void *search_free (unsigned size, void *&first)
Page *alloc_page ()
Thread *alloc_thread ()
Memory *alloc_memory ()
// Functions which can be called from assembly must not be mangled.
extern "C":
// Kernel entry points, called from entry.S.
void init ()
Thread *interrupt (Thread *current)
Thread *cache_error (Thread *current)
Thread *exception (Thread *current)
// tlb stuff. tlb_refill is also an entry point.
void tlb_setup ()
Thread *tlb_refill (Thread *current, unsigned EntryHi)
// Start running the idle task for the first time.
void run_idle (Thread *self)
// Panic. n is sent over caps led. message is currently ignored.
void panic (unsigned n, char const *message)
// Debug: switch caps led
void led (bool on, bool tick)
#ifndef EXTERN
#define EXTERN extern
#endif
struct FreePage:
FreePage *next
@ -72,4 +81,13 @@ EXTERN Thread idle
EXTERN Memory idle_memory
EXTERN Page idle_page
template <typename _T> void Object <_T>::free (Memory *parent, _T *&first):
if prev:
prev->next = next
else:
first = next
if next:
next->prev = prev
free_obj (parent)
#endif

35
mips.hhp Normal file
View File

@ -0,0 +1,35 @@
#pypp 0
#ifndef _ARCH_HH
#define _ARCH_HH
struct Thread_arch:
unsigned at, v0, v1, a0, a1, a2, a3
unsigned t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
unsigned gp, fp, ra, hi, lo, k0, k1
#define Thread_arch_init(thread) do { thread->at = 0; thread->v0 = 0; thread->v1 = 0; thread->a0 = 0; thread->a1 = 0; thread->a2 = 0; thread->a3 = 0; thread->t0 = 0; thread->t1 = 0; thread->t2 = 0; thread->t3 = 0; thread->t4 = 0; thread->t5 = 0; thread->t6 = 0; thread->t7 = 0; thread->t8 = 0; thread->t9 = 0; thread->gp = 0; thread->fp = 0; thread->ra = 0; thread->hi = 0; thread->lo = 0; thread->k0 = 0; thread->k1 = 0; } while (0)
struct Memory_arch:
unsigned asid
Page ***directory
EXTERN unsigned g_asid
#define Memory_arch_init(mem) do { mem->asid = g_asid++; g_asid &= 0x3f; mem->directory = NULL; } while (0)
// Functions which can be called from assembly must not be mangled.
extern "C":
// Kernel entry points, called from entry.S.
Thread *interrupt (Thread *current)
Thread *cache_error (Thread *current)
Thread *exception (Thread *current)
Thread *tlb_refill (Thread *current, unsigned EntryHi)
#ifdef INIT
// Initialize most things (the rest is done in boot.S)
void init ()
// Start running the idle task for the first time.
void run_idle (Thread *self)
#endif
#endif