mirror of
git://projects.qi-hardware.com/iris.git
synced 2025-04-21 12:27:27 +03:00
more
This commit is contained in:
93
alloc.ccp
93
alloc.ccp
@@ -1,14 +1,17 @@
|
|||||||
#pypp 0
|
#pypp 0
|
||||||
#include "kernel.hh"
|
#include "kernel.hh"
|
||||||
|
|
||||||
// TODO: avoid recursion.
|
|
||||||
bool Memory::use ():
|
bool Memory::use ():
|
||||||
|
// Go up to parents, incrementing used.
|
||||||
|
Memory *m
|
||||||
|
for m = this; m; m = m->parent:
|
||||||
if used >= limit:
|
if used >= limit:
|
||||||
|
// Not allowed. Restore used for all children.
|
||||||
|
for Memory *r = this; r != m; r = r->parent:
|
||||||
|
--r->used
|
||||||
return false
|
return false
|
||||||
if !parent || parent->use ():
|
++m->used
|
||||||
++used
|
|
||||||
return true
|
return true
|
||||||
return false
|
|
||||||
|
|
||||||
void Memory::unuse ():
|
void Memory::unuse ():
|
||||||
--used
|
--used
|
||||||
@@ -129,21 +132,28 @@ void Object_base::free_obj (Memory *parent):
|
|||||||
|
|
||||||
Page *Memory::alloc_page ():
|
Page *Memory::alloc_page ():
|
||||||
Page *ret = (Page *)search_free (sizeof (Page), (void **)&pages)
|
Page *ret = (Page *)search_free (sizeof (Page), (void **)&pages)
|
||||||
|
if !ret:
|
||||||
|
return NULL
|
||||||
ret->physical = 0
|
ret->physical = 0
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
Thread *Memory::alloc_thread ():
|
Thread *Memory::alloc_thread ():
|
||||||
Thread *ret = (Thread *)search_free (sizeof (Thread), (void **)&threads)
|
Thread *ret = (Thread *)search_free (sizeof (Thread), (void **)&threads)
|
||||||
|
if !ret:
|
||||||
|
return NULL
|
||||||
ret->address_space = this
|
ret->address_space = this
|
||||||
ret->pc = 0
|
ret->pc = 0
|
||||||
ret->sp = 0
|
ret->sp = 0
|
||||||
Thread_arch_init (ret)
|
Thread_arch_init (ret)
|
||||||
ret->schedule_prev = NULL
|
ret->schedule_prev = NULL
|
||||||
ret->schedule_next = NULL
|
ret->schedule_next = NULL
|
||||||
|
ret->receivers = NULL
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
Message *Memory::alloc_message (Capability *source):
|
Message *Memory::alloc_message (Capability *source):
|
||||||
Message *ret = (Message *)search_free (sizeof (Message), (void **)&source->target->messages)
|
Message *ret = (Message *)search_free (sizeof (Message), (void **)&source->target->messages)
|
||||||
|
if !ret:
|
||||||
|
return NULL
|
||||||
for unsigned i = 0; i < 4; ++i:
|
for unsigned i = 0; i < 4; ++i:
|
||||||
ret->capabilities[i] = NULL
|
ret->capabilities[i] = NULL
|
||||||
ret->data[i] = 0
|
ret->data[i] = 0
|
||||||
@@ -152,6 +162,8 @@ Message *Memory::alloc_message (Capability *source):
|
|||||||
|
|
||||||
Receiver *Memory::alloc_receiver ():
|
Receiver *Memory::alloc_receiver ():
|
||||||
Receiver *ret = (Receiver *)search_free (sizeof (Receiver), (void **)&receivers)
|
Receiver *ret = (Receiver *)search_free (sizeof (Receiver), (void **)&receivers)
|
||||||
|
if !ret:
|
||||||
|
return NULL
|
||||||
ret->owner = NULL
|
ret->owner = NULL
|
||||||
ret->prev_owned = NULL
|
ret->prev_owned = NULL
|
||||||
ret->next_owned = NULL
|
ret->next_owned = NULL
|
||||||
@@ -161,6 +173,8 @@ Receiver *Memory::alloc_receiver ():
|
|||||||
|
|
||||||
Capability *Memory::alloc_capability (Receiver *target, Capability **parent, unsigned protected_data):
|
Capability *Memory::alloc_capability (Receiver *target, Capability **parent, unsigned protected_data):
|
||||||
Capability *ret = (Capability *)search_free (sizeof (Capability), (void **)&capabilities)
|
Capability *ret = (Capability *)search_free (sizeof (Capability), (void **)&capabilities)
|
||||||
|
if !ret:
|
||||||
|
return NULL
|
||||||
ret->target = target
|
ret->target = target
|
||||||
ret->children = NULL
|
ret->children = NULL
|
||||||
ret->sibling_prev = NULL
|
ret->sibling_prev = NULL
|
||||||
@@ -170,8 +184,20 @@ Capability *Memory::alloc_capability (Receiver *target, Capability **parent, uns
|
|||||||
ret->protected_data = protected_data
|
ret->protected_data = protected_data
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
Cappage *Memory::alloc_cappage ():
|
||||||
|
Cappage *ret = (Cappage *)search_free (sizeof (Cappage), (void **)&cappages)
|
||||||
|
if !ret:
|
||||||
|
return NULL
|
||||||
|
ret->page = (Capability *)zalloc ()
|
||||||
|
if !ret->page:
|
||||||
|
free_cappage (ret)
|
||||||
|
return NULL
|
||||||
|
return ret
|
||||||
|
|
||||||
Memory *Memory::alloc_memory ():
|
Memory *Memory::alloc_memory ():
|
||||||
Memory *ret = (Memory *)search_free (sizeof (Memory), (void **)&memories)
|
Memory *ret = (Memory *)search_free (sizeof (Memory), (void **)&memories)
|
||||||
|
if !ret:
|
||||||
|
return NULL
|
||||||
ret->parent = this
|
ret->parent = this
|
||||||
ret->frees = NULL
|
ret->frees = NULL
|
||||||
ret->pages = NULL
|
ret->pages = NULL
|
||||||
@@ -190,6 +216,7 @@ void Memory::free_page (Page *page):
|
|||||||
if page->next:
|
if page->next:
|
||||||
page->next->prev = page->prev
|
page->next->prev = page->prev
|
||||||
unuse ()
|
unuse ()
|
||||||
|
if page->physical:
|
||||||
pfree (page->physical)
|
pfree (page->physical)
|
||||||
page->free_obj (this)
|
page->free_obj (this)
|
||||||
|
|
||||||
@@ -207,6 +234,8 @@ void Memory::free_thread (Thread *thread):
|
|||||||
first_scheduled = thread->schedule_next
|
first_scheduled = thread->schedule_next
|
||||||
if thread->schedule_next:
|
if thread->schedule_next:
|
||||||
thread->schedule_next->schedule_prev = thread->schedule_prev
|
thread->schedule_next->schedule_prev = thread->schedule_prev
|
||||||
|
while thread->receivers:
|
||||||
|
thread->receivers->orphan ()
|
||||||
thread->free_obj (this)
|
thread->free_obj (this)
|
||||||
|
|
||||||
void Memory::free_message (Message *message):
|
void Memory::free_message (Message *message):
|
||||||
@@ -215,45 +244,39 @@ void Memory::free_message (Message *message):
|
|||||||
message->free_obj (this)
|
message->free_obj (this)
|
||||||
|
|
||||||
void Memory::free_receiver (Receiver *receiver):
|
void Memory::free_receiver (Receiver *receiver):
|
||||||
if receiver->prev_owned:
|
receiver->orphan ()
|
||||||
receiver->prev_owned->next_owned = receiver->next_owned
|
|
||||||
else:
|
|
||||||
receiver->owner->receivers = receiver->next_owned
|
|
||||||
if receiver->next_owned:
|
|
||||||
receiver->next_owned->prev_owned = receiver->prev_owned
|
|
||||||
while receiver->capabilities:
|
while receiver->capabilities:
|
||||||
receiver->capabilities->invalidate ()
|
receiver->capabilities->invalidate ()
|
||||||
while receiver->messages:
|
while receiver->messages:
|
||||||
free_message (receiver->messages)
|
free_message (receiver->messages)
|
||||||
receiver->free_obj (this)
|
receiver->free_obj (this)
|
||||||
|
|
||||||
void Memory::free_capability (Capability *capability):
|
void Receiver::orphan ():
|
||||||
if capability->sibling_prev:
|
if prev_owned:
|
||||||
capability->sibling_prev->sibling_next = capability->sibling_next
|
prev_owned->next_owned = next_owned
|
||||||
else:
|
else:
|
||||||
capability->target->capabilities = capability->sibling_next
|
owner->receivers = next_owned
|
||||||
if capability->sibling_next:
|
if next_owned:
|
||||||
capability->sibling_next->sibling_prev = capability->sibling_prev
|
next_owned->prev_owned = prev_owned
|
||||||
// The sibling_prev link is used here to point to the parent.
|
owner = NULL
|
||||||
// This method is used to avoid recursion.
|
|
||||||
capability->sibling_prev = NULL
|
void Receiver::own (Thread *o):
|
||||||
Capability *c = capability
|
if owner:
|
||||||
while c->children:
|
orphan ()
|
||||||
c->children->sibling_prev = c
|
owner = o
|
||||||
c = c->children
|
next_owned = o->receivers
|
||||||
while c:
|
if next_owned:
|
||||||
Capability *next = c->sibling_next
|
next_owned->prev_owned = this
|
||||||
if !next:
|
o->receivers = this
|
||||||
next = c->sibling_prev
|
|
||||||
if next:
|
void Memory::free_capability (Capability *capability):
|
||||||
next->sibling_prev = c->sibling_prev
|
capability->invalidate ()
|
||||||
c->free_obj (this)
|
capability->free_obj (this)
|
||||||
c = next
|
|
||||||
|
|
||||||
void Capability::invalidate ():
|
void Capability::invalidate ():
|
||||||
if sibling_prev:
|
if sibling_prev:
|
||||||
sibling_prev->sibling_next = sibling_next
|
sibling_prev->sibling_next = sibling_next
|
||||||
else:
|
else if target:
|
||||||
target->capabilities = sibling_next
|
target->capabilities = sibling_next
|
||||||
if sibling_next:
|
if sibling_next:
|
||||||
sibling_next->sibling_prev = sibling_prev
|
sibling_next->sibling_prev = sibling_prev
|
||||||
@@ -277,6 +300,12 @@ void Capability::invalidate ():
|
|||||||
c->protected_data = 0
|
c->protected_data = 0
|
||||||
c = next
|
c = next
|
||||||
|
|
||||||
|
void Memory::free_cappage (Cappage *p):
|
||||||
|
for unsigned i = 0; i < CAPPAGE_SIZE; ++i:
|
||||||
|
p->page[i].invalidate ()
|
||||||
|
zfree ((unsigned)p->page)
|
||||||
|
p->free_obj (this)
|
||||||
|
|
||||||
void Memory::free_memory (Memory *mem):
|
void Memory::free_memory (Memory *mem):
|
||||||
if mem->prev:
|
if mem->prev:
|
||||||
mem->prev->next = mem->next
|
mem->prev->next = mem->next
|
||||||
|
|||||||
@@ -47,6 +47,7 @@ extern "C" {
|
|||||||
#define CAP_CAPABILITY_GET 1
|
#define CAP_CAPABILITY_GET 1
|
||||||
#define CAP_CAPABILITY_SET_DEATH_NOTIFY 2
|
#define CAP_CAPABILITY_SET_DEATH_NOTIFY 2
|
||||||
|
|
||||||
|
#define CAPPAGE_SIZE 113
|
||||||
#define CAP_CAPPAGE_SET 1
|
#define CAP_CAPPAGE_SET 1
|
||||||
#define CAP_CAPPAGE_GET 2
|
#define CAP_CAPPAGE_GET 2
|
||||||
|
|
||||||
|
|||||||
12
init.ccp
12
init.ccp
@@ -118,7 +118,8 @@ static void init_threads ():
|
|||||||
panic (0x22446688, "unable to map initial page")
|
panic (0x22446688, "unable to map initial page")
|
||||||
else:
|
else:
|
||||||
for unsigned p = (shdr->sh_addr & PAGE_MASK); p <= ((shdr->sh_addr + shdr->sh_size - 1) & PAGE_MASK); p += PAGE_SIZE:
|
for unsigned p = (shdr->sh_addr & PAGE_MASK); p <= ((shdr->sh_addr + shdr->sh_size - 1) & PAGE_MASK); p += PAGE_SIZE:
|
||||||
Page *page = mem->get_mapping (p)
|
bool write = false
|
||||||
|
Page *page = mem->get_mapping (p, &write)
|
||||||
if !page:
|
if !page:
|
||||||
page = mem->alloc_page ()
|
page = mem->alloc_page ()
|
||||||
if !page:
|
if !page:
|
||||||
@@ -127,6 +128,8 @@ static void init_threads ():
|
|||||||
if !page->physical || !mem->map (page, p, true):
|
if !page->physical || !mem->map (page, p, true):
|
||||||
panic (0x33557799, "unable to map initial bss page")
|
panic (0x33557799, "unable to map initial bss page")
|
||||||
else:
|
else:
|
||||||
|
if !write:
|
||||||
|
panic (0x20203030, "bss section starts on read-only page")
|
||||||
for unsigned a = p; a < p + PAGE_SIZE; a += 4:
|
for unsigned a = p; a < p + PAGE_SIZE; a += 4:
|
||||||
if a >= shdr->sh_addr + shdr->sh_size:
|
if a >= shdr->sh_addr + shdr->sh_size:
|
||||||
break
|
break
|
||||||
@@ -134,11 +137,10 @@ static void init_threads ():
|
|||||||
continue
|
continue
|
||||||
((unsigned *)page->physical)[(a & ~PAGE_MASK) >> 2] = 0
|
((unsigned *)page->physical)[(a & ~PAGE_MASK) >> 2] = 0
|
||||||
for unsigned p = 0; p <= ((thread_start[i + 1] - thread_start[i] - 1) >> PAGE_BITS); ++p:
|
for unsigned p = 0; p <= ((thread_start[i + 1] - thread_start[i] - 1) >> PAGE_BITS); ++p:
|
||||||
// TODO: this also skips pages where new space is allocated.
|
|
||||||
if pages[p]:
|
if pages[p]:
|
||||||
continue
|
continue
|
||||||
++top_memory.limit
|
++top_memory.limit
|
||||||
top_memory.zfree (thread_start[i] + (p << PAGE_BITS))
|
top_memory.pfree (thread_start[i] + (p << PAGE_BITS))
|
||||||
Page *stackpage = mem->alloc_page ()
|
Page *stackpage = mem->alloc_page ()
|
||||||
stackpage->physical = mem->zalloc ()
|
stackpage->physical = mem->zalloc ()
|
||||||
if !stackpage || !mem->map (stackpage, 0x7ffff000, true):
|
if !stackpage || !mem->map (stackpage, 0x7ffff000, true):
|
||||||
@@ -192,6 +194,10 @@ void init ():
|
|||||||
top_memory.arch.directory = NULL
|
top_memory.arch.directory = NULL
|
||||||
top_memory.arch.asid = 0
|
top_memory.arch.asid = 0
|
||||||
|
|
||||||
|
for unsigned i = 0; i < 63; ++i:
|
||||||
|
asids[i] = i + 1
|
||||||
|
asids[63] = 0
|
||||||
|
|
||||||
init_threads ()
|
init_threads ()
|
||||||
|
|
||||||
// Enable all interrupts and say we're handling an exception.
|
// Enable all interrupts and say we're handling an exception.
|
||||||
|
|||||||
@@ -7,17 +7,18 @@ Thread *tlb_refill ():
|
|||||||
//panic (0x88776655, "TLB refill")
|
//panic (0x88776655, "TLB refill")
|
||||||
unsigned EntryHi
|
unsigned EntryHi
|
||||||
cp0_get (CP0_ENTRY_HI, EntryHi)
|
cp0_get (CP0_ENTRY_HI, EntryHi)
|
||||||
Page *page0 = current->address_space->get_mapping (EntryHi & ~(1 << 12))
|
bool write0 = false, write1 = false
|
||||||
Page *page1 = current->address_space->get_mapping (EntryHi | (1 << 12))
|
Page *page0 = current->address_space->get_mapping (EntryHi & ~(1 << 12), &write0)
|
||||||
|
Page *page1 = current->address_space->get_mapping (EntryHi | (1 << 12), &write1)
|
||||||
if (!(EntryHi & (1 << 12)) && !page0) || ((EntryHi & (1 << 12)) && !page1):
|
if (!(EntryHi & (1 << 12)) && !page0) || ((EntryHi & (1 << 12)) && !page1):
|
||||||
panic (0x22222222, "no page mapped at requested address")
|
panic (0x22222222, "no page mapped at requested address")
|
||||||
unsigned low0, low1
|
unsigned low0, low1
|
||||||
if page0:
|
if page0:
|
||||||
low0 = (unsigned)page0->physical | 0x18 | 0x4 | 0x2
|
low0 = ((page0->physical & ~0x80000fff) >> 6) | 0x18 | (write0 ? 0x4 : 0) | 0x2
|
||||||
else
|
else
|
||||||
low0 = 0
|
low0 = 0
|
||||||
if page1:
|
if page1:
|
||||||
low1 = (unsigned)page1->physical | 0x18 | 0x4 | 0x2
|
low1 = ((page1->physical & ~0x80000fff) >> 6) | 0x18 | (write1 ? 0x4 : 0) | 0x2
|
||||||
else
|
else
|
||||||
low1 = 0
|
low1 = 0
|
||||||
cp0_set (CP0_ENTRY_LO0, low0)
|
cp0_set (CP0_ENTRY_LO0, low0)
|
||||||
@@ -87,7 +88,7 @@ Thread *exception ():
|
|||||||
panic (0x91223344, "Breakpoint.")
|
panic (0x91223344, "Breakpoint.")
|
||||||
case 10:
|
case 10:
|
||||||
// Reserved instruction.
|
// Reserved instruction.
|
||||||
panic (*(unsigned *)0x004000b0)
|
panic ((*(unsigned *)0x004000b0) + 1)
|
||||||
panic (0xa1223344, "Reserved instruction.")
|
panic (0xa1223344, "Reserved instruction.")
|
||||||
case 11:
|
case 11:
|
||||||
// Coprocessor unusable.
|
// Coprocessor unusable.
|
||||||
|
|||||||
13
kernel.hhp
13
kernel.hhp
@@ -59,6 +59,8 @@ struct Receiver : public Object <Receiver>:
|
|||||||
Receiver *prev_owned, *next_owned
|
Receiver *prev_owned, *next_owned
|
||||||
Capability *capabilities
|
Capability *capabilities
|
||||||
Message *messages
|
Message *messages
|
||||||
|
void own (Thread *o)
|
||||||
|
void orphan ()
|
||||||
|
|
||||||
struct Capability : public Object <Capability>:
|
struct Capability : public Object <Capability>:
|
||||||
Receiver *target
|
Receiver *target
|
||||||
@@ -68,6 +70,9 @@ struct Capability : public Object <Capability>:
|
|||||||
void invoke (unsigned d0, unsigned d1, unsigned d2, unsigned d3, Capability *c0, Capability *c1, Capability *c2, Capability *c3)
|
void invoke (unsigned d0, unsigned d1, unsigned d2, unsigned d3, Capability *c0, Capability *c1, Capability *c2, Capability *c3)
|
||||||
void invalidate ()
|
void invalidate ()
|
||||||
|
|
||||||
|
struct Cappage : public Object <Cappage>:
|
||||||
|
Capability *page
|
||||||
|
|
||||||
struct Memory : public Object <Memory>:
|
struct Memory : public Object <Memory>:
|
||||||
Memory *parent
|
Memory *parent
|
||||||
Free *frees
|
Free *frees
|
||||||
@@ -81,7 +86,7 @@ struct Memory : public Object <Memory>:
|
|||||||
|
|
||||||
inline bool map (Page *page, unsigned address, bool write)
|
inline bool map (Page *page, unsigned address, bool write)
|
||||||
inline void unmap (Page *page, unsigned address)
|
inline void unmap (Page *page, unsigned address)
|
||||||
inline Page *get_mapping (unsigned address)
|
inline Page *get_mapping (unsigned address, bool *writable)
|
||||||
|
|
||||||
// Allocation of pages.
|
// Allocation of pages.
|
||||||
bool use ()
|
bool use ()
|
||||||
@@ -137,7 +142,7 @@ void Memory_arch_init (Memory *mem)
|
|||||||
void Memory_arch_free (Memory *mem)
|
void Memory_arch_free (Memory *mem)
|
||||||
bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write)
|
bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write)
|
||||||
void Memory_arch_unmap (Memory *mem, Page *page, unsigned address)
|
void Memory_arch_unmap (Memory *mem, Page *page, unsigned address)
|
||||||
Page *Memory_arch_get_mapping (Memory *mem, unsigned address)
|
Page *Memory_arch_get_mapping (Memory *mem, unsigned address, bool *writable)
|
||||||
void arch_invoke ()
|
void arch_invoke ()
|
||||||
void arch_schedule (Thread *previous, Thread *target)
|
void arch_schedule (Thread *previous, Thread *target)
|
||||||
|
|
||||||
@@ -145,7 +150,7 @@ bool Memory::map (Page *page, unsigned address, bool write):
|
|||||||
return Memory_arch_map (this, page, address, write)
|
return Memory_arch_map (this, page, address, write)
|
||||||
void Memory::unmap (Page *page, unsigned address):
|
void Memory::unmap (Page *page, unsigned address):
|
||||||
Memory_arch_unmap (this, page, address)
|
Memory_arch_unmap (this, page, address)
|
||||||
Page *Memory::get_mapping (unsigned address):
|
Page *Memory::get_mapping (unsigned address, bool *writable):
|
||||||
return Memory_arch_get_mapping (this, address)
|
return Memory_arch_get_mapping (this, address, writable)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
52
mips.ccp
52
mips.ccp
@@ -29,12 +29,20 @@ void Thread_arch_init (Thread *thread):
|
|||||||
thread->arch.k1 = 0
|
thread->arch.k1 = 0
|
||||||
|
|
||||||
void Memory_arch_init (Memory *mem):
|
void Memory_arch_init (Memory *mem):
|
||||||
++g_asid
|
mem->arch.asid = 1
|
||||||
if g_asid > 0x3f:
|
|
||||||
g_asid = 1
|
|
||||||
mem->arch.asid = g_asid
|
|
||||||
mem->arch.directory = NULL
|
mem->arch.directory = NULL
|
||||||
|
|
||||||
|
static void flush_tlb (unsigned asid):
|
||||||
|
for unsigned tlb = 1; tlb < 32; ++tlb:
|
||||||
|
cp0_set (CP0_INDEX, tlb)
|
||||||
|
__asm__ volatile ("tlbr")
|
||||||
|
unsigned hi
|
||||||
|
cp0_get (CP0_ENTRY_HI, hi)
|
||||||
|
if (hi & 0x1f) == asid:
|
||||||
|
// Set asid to 0, which is only used by the idle task.
|
||||||
|
cp0_set (CP0_ENTRY_HI, 0x2000 * tlb)
|
||||||
|
__asm__ volatile ("tlbwi")
|
||||||
|
|
||||||
void Memory_arch_free (Memory *mem):
|
void Memory_arch_free (Memory *mem):
|
||||||
if !mem->arch.directory:
|
if !mem->arch.directory:
|
||||||
return
|
return
|
||||||
@@ -50,6 +58,10 @@ void Memory_arch_free (Memory *mem):
|
|||||||
mem->unuse ()
|
mem->unuse ()
|
||||||
mem->zfree ((unsigned)table)
|
mem->zfree ((unsigned)table)
|
||||||
mem->arch.directory[i] = NULL
|
mem->arch.directory[i] = NULL
|
||||||
|
if (Memory *)asids[mem->arch.asid] == mem:
|
||||||
|
flush_tlb (mem->arch.asid)
|
||||||
|
asids[mem->arch.asid] = asids[0]
|
||||||
|
asids[0] = mem->arch.asid
|
||||||
mem->unuse ()
|
mem->unuse ()
|
||||||
mem->zfree ((unsigned)mem->arch.directory)
|
mem->zfree ((unsigned)mem->arch.directory)
|
||||||
|
|
||||||
@@ -58,25 +70,27 @@ bool Memory_arch_map (Memory *mem, Page *page, unsigned address, bool write):
|
|||||||
mem->arch.directory = (unsigned **)mem->zalloc ()
|
mem->arch.directory = (unsigned **)mem->zalloc ()
|
||||||
if !mem->arch.directory:
|
if !mem->arch.directory:
|
||||||
return false
|
return false
|
||||||
unsigned *table = mem->arch.directory[(unsigned)address >> 22]
|
unsigned *table = mem->arch.directory[address >> 22]
|
||||||
if !table:
|
if !table:
|
||||||
table = (unsigned *)mem->zalloc ()
|
table = (unsigned *)mem->zalloc ()
|
||||||
if !table:
|
if !table:
|
||||||
return false
|
return false
|
||||||
mem->arch.directory[(unsigned)address >> 22] = table
|
mem->arch.directory[address >> 22] = table
|
||||||
unsigned idx = ((unsigned)address >> 12) & ((1 << 10) - 1)
|
unsigned idx = (address >> 12) & ((1 << 10) - 1)
|
||||||
if table[idx]:
|
if table[idx]:
|
||||||
mem->unmap ((Page *)(table[idx] & ~3), address)
|
mem->unmap ((Page *)(table[idx] & ~3), address)
|
||||||
table[idx] = write ? (unsigned)page : (unsigned)page + 1
|
table[idx] = write ? (unsigned)page : (unsigned)page + 1
|
||||||
return true
|
return true
|
||||||
|
|
||||||
void Memory_arch_unmap (Memory *mem, Page *page, unsigned address):
|
void Memory_arch_unmap (Memory *mem, Page *page, unsigned address):
|
||||||
unsigned *table = mem->arch.directory[(unsigned)address >> 22]
|
unsigned *table = mem->arch.directory[address >> 22]
|
||||||
table[((unsigned)address >> 12) & ((1 << 10) - 1)] = 0
|
table[(address >> 12) & ((1 << 10) - 1)] = 0
|
||||||
|
|
||||||
Page *Memory_arch_get_mapping (Memory *mem, unsigned address):
|
Page *Memory_arch_get_mapping (Memory *mem, unsigned address, bool *writable):
|
||||||
unsigned *table = mem->arch.directory[(unsigned)address >> 22]
|
unsigned *table = mem->arch.directory[address >> 22]
|
||||||
unsigned v = table[((unsigned)address >> 12) & ((1 << 10) - 1)]
|
unsigned v = table[(address >> 12) & ((1 << 10) - 1)]
|
||||||
|
if writable:
|
||||||
|
*writable = !(v & 1)
|
||||||
return (Page *)(v & ~1)
|
return (Page *)(v & ~1)
|
||||||
|
|
||||||
void arch_invoke ():
|
void arch_invoke ():
|
||||||
@@ -95,5 +109,17 @@ void arch_invoke ():
|
|||||||
target->invoke (current->arch.t0, current->arch.t1, current->arch.t2, current->arch.t3, c0, c1, c2, c3)
|
target->invoke (current->arch.t0, current->arch.t1, current->arch.t2, current->arch.t3, c0, c1, c2, c3)
|
||||||
|
|
||||||
void arch_schedule (Thread *previous, Thread *target):
|
void arch_schedule (Thread *previous, Thread *target):
|
||||||
|
if (Memory *)asids[target->address_space->arch.asid] != target->address_space:
|
||||||
|
if asids[0]:
|
||||||
|
target->address_space->arch.asid = asids[0]
|
||||||
|
asids[0] = asids[asids[0]]
|
||||||
|
else:
|
||||||
|
static unsigned random = 1
|
||||||
|
target->address_space->arch.asid = random
|
||||||
|
// Overwrite used asid, so flush those values from tlb.
|
||||||
|
flush_tlb (random)
|
||||||
|
++random
|
||||||
|
if random >= 64:
|
||||||
|
random = 1
|
||||||
|
asids[target->address_space->arch.asid] = (unsigned)target
|
||||||
cp0_set (CP0_ENTRY_HI, target->address_space->arch.asid)
|
cp0_set (CP0_ENTRY_HI, target->address_space->arch.asid)
|
||||||
// TODO: flush TLB if the asid is already taken.
|
|
||||||
|
|||||||
4
mips.hhp
4
mips.hhp
@@ -58,7 +58,9 @@ struct Memory_arch:
|
|||||||
unsigned asid
|
unsigned asid
|
||||||
unsigned **directory
|
unsigned **directory
|
||||||
|
|
||||||
EXTERN unsigned g_asid
|
// Pointers to Memory when asid is taken, index of next free, or 0, if free.
|
||||||
|
// asid[0] is used as index to first free asid.
|
||||||
|
EXTERN unsigned asids[64]
|
||||||
|
|
||||||
// Functions which can be called from assembly must not be mangled.
|
// Functions which can be called from assembly must not be mangled.
|
||||||
extern "C":
|
extern "C":
|
||||||
|
|||||||
Reference in New Issue
Block a user