|
|
| version 1.12, 2004/02/20 16:09:04 | version 1.21, 2004/03/24 14:03:52 |
|---|---|
| Line 1 | Line 1 |
| /* $Id$ */ | /* $Id$ */ |
| /* | /* |
| * Copyright (c) 2003 NONAKA Kimihiro | * Copyright (c) 2003-2004 NONAKA Kimihiro |
| * All rights reserved. | * All rights reserved. |
| * | * |
| * Redistribution and use in source and binary forms, with or without | * Redistribution and use in source and binary forms, with or without |
| Line 184 static const UINT8 page_access_bit[32] = | Line 184 static const UINT8 page_access_bit[32] = |
| * +- CR3(物理アドレス) | * +- CR3(物理アドレス) |
| */ | */ |
| static UINT32 paging(UINT32 laddr, int crw, int user_mode); | static UINT32 paging(const UINT32 laddr, const int crw, const int user_mode); |
| #if defined(IA32_SUPPORT_TLB) | #if defined(IA32_SUPPORT_TLB) |
| static BOOL tlb_lookup(UINT32 vaddr, int crw, UINT32 *paddr); | static BOOL tlb_lookup(const UINT32 vaddr, const int crw, UINT32 *paddr); |
| static void tlb_update(UINT32 paddr, UINT entry, int crw); | static void tlb_update(const UINT32 laddr, const UINT entry, const int crw); |
| #endif | #endif |
| #if defined(IA32_PAGING_EACHSIZE) | |
| UINT8 MEMCALL | |
| cpu_memory_access_la_RMW_b(UINT32 laddr, UINT32 (*func)(UINT32, void *), void *arg) | |
| { | |
| const int crw = (CPU_PAGE_WRITE|CPU_PAGE_DATA); | |
| const int user_mode = CPU_STAT_USER_MODE; | |
| UINT32 result, value; | |
| UINT32 paddr; | |
| paddr = paging(laddr, crw, user_mode); | |
| value = cpu_memoryread(paddr); | |
| result = (*func)(value, arg); | |
| cpu_memorywrite(paddr, (UINT8)result); | |
| return value; | |
| } | |
| UINT16 MEMCALL | |
| cpu_memory_access_la_RMW_w(UINT32 laddr, UINT32 (*func)(UINT32, void *), void *arg) | |
| { | |
| const int crw = (CPU_PAGE_WRITE|CPU_PAGE_DATA); | |
| const int user_mode = CPU_STAT_USER_MODE; | |
| UINT32 result, value; | |
| UINT32 paddr[2]; | |
| paddr[0] = paging(laddr, crw, user_mode); | |
| if ((laddr + 1) & 0x00000fff) { | |
| value = cpu_memoryread_w(paddr[0]); | |
| result = (*func)(value, arg); | |
| cpu_memorywrite_w(paddr[0], (UINT16)result); | |
| } else { | |
| paddr[1] = paging(laddr + 1, crw, user_mode); | |
| value = cpu_memoryread_b(paddr[0]); | |
| value += (UINT16)cpu_memoryread_b(paddr[1]) << 8; | |
| result = (*func)(value, arg); | |
| cpu_memorywrite(paddr[0], (UINT8)result); | |
| cpu_memorywrite(paddr[1], (UINT8)(result >> 8)); | |
| } | |
| return value; | |
| } | |
| UINT32 MEMCALL | UINT32 MEMCALL |
| cpu_linear_memory_read(UINT32 laddr, UINT length, int crw, int user_mode) | cpu_memory_access_la_RMW_d(UINT32 laddr, UINT32 (*func)(UINT32, void *), void *arg) |
| { | { |
| UINT32 value = 0; | const int crw = (CPU_PAGE_WRITE|CPU_PAGE_DATA); |
| UINT32 paddr; | const int user_mode = CPU_STAT_USER_MODE; |
| UINT remain; /* page remain */ | UINT32 result, value; |
| UINT r; | UINT32 paddr[2]; |
| int shift = 0; | UINT remain; |
| /* XXX: 4MB pages... */ | paddr[0] = paging(laddr, crw, user_mode); |
| remain = 0x1000 - (laddr & 0x00000fff); | remain = 0x1000 - (laddr & 0x00000fff); |
| for (;;) { | if (remain >= 4) { |
| paddr = paging(laddr, crw, user_mode); | value = cpu_memoryread_d(paddr[0]); |
| result = (*func)(value, arg); | |
| cpu_memorywrite_d(paddr[0], result); | |
| } else { | |
| paddr[1] = paging(laddr + remain, crw, user_mode); | |
| switch (remain) { | |
| case 3: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT32)cpu_memoryread_w(paddr[0] + 1) << 8; | |
| value += (UINT32)cpu_memoryread(paddr[1]) << 24; | |
| result = (*func)(value, arg); | |
| cpu_memorywrite(paddr[0], (UINT8)result); | |
| cpu_memorywrite_w(paddr[0] + 1, (UINT16)(result >> 8)); | |
| cpu_memorywrite(paddr[1], (BYTE)(result >> 24)); | |
| break; | |
| r = (remain > length) ? length : remain; | case 2: |
| switch (r) { | value = cpu_memoryread_w(paddr[0]); |
| case 4: | value += (UINT32)cpu_memoryread_w(paddr[1]) << 16; |
| value = cpu_memoryread_d(paddr); | result = (*func)(value, arg); |
| cpu_memorywrite_w(paddr[0], (UINT16)result); | |
| cpu_memorywrite_w(paddr[1], (UINT16)(result >> 16)); | |
| break; | |
| case 1: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT32)cpu_memoryread_w(paddr[1]) << 8; | |
| value += (UINT32)cpu_memoryread(paddr[1] + 2) << 24; | |
| result = (*func)(value, arg); | |
| cpu_memorywrite(paddr[0], (UINT8)result); | |
| cpu_memorywrite_w(paddr[1], (UINT16)(result >> 8)); | |
| cpu_memorywrite(paddr[1] + 2, (UINT8)(result >> 24)); | |
| break; | |
| default: | |
| ia32_panic("cpu_memory_access_la_RMW_d(): out of range (remain = %d)\n", remain); | |
| value = 0; /* compiler happy */ | |
| break; | break; |
| } | |
| } | |
| return value; | |
| } | |
| UINT8 MEMCALL | |
| cpu_linear_memory_read_b(UINT32 laddr, const int crw, const int user_mode) | |
| { | |
| UINT32 addr; | |
| addr = paging(laddr, crw, user_mode); | |
| return cpu_memoryread(addr); | |
| } | |
| UINT16 MEMCALL | |
| cpu_linear_memory_read_w(UINT32 laddr, const int crw, const int user_mode) | |
| { | |
| UINT32 addr, addr2; | |
| UINT16 value; | |
| addr = paging(laddr, crw, user_mode); | |
| if ((laddr + 1) & 0x00000fff) { | |
| return cpu_memoryread_w(addr); | |
| } else { | |
| addr2 = paging(laddr + 1, crw, user_mode); | |
| value = cpu_memoryread_b(addr); | |
| value += (UINT16)cpu_memoryread_b(addr2) << 8; | |
| return value; | |
| } | |
| } | |
| UINT32 MEMCALL | |
| cpu_linear_memory_read_d(UINT32 laddr, const int crw, const int user_mode) | |
| { | |
| UINT32 addr, addr2; | |
| UINT32 value; | |
| UINT remain; | |
| addr = paging(laddr, crw, user_mode); | |
| remain = 0x1000 - (laddr & 0x00000fff); | |
| if (remain >= 4) { | |
| return cpu_memoryread_d(addr); | |
| } else { | |
| addr2 = paging(laddr + remain, crw, user_mode); | |
| switch (remain) { | |
| case 3: | case 3: |
| value += (UINT32)cpu_memoryread(paddr) << shift; | value = cpu_memoryread(addr); |
| shift += 8; | value += (UINT32)cpu_memoryread_w(addr + 1) << 8; |
| paddr++; | value += (UINT32)cpu_memoryread(addr2) << 24; |
| /*FALLTHROUGH*/ | break; |
| case 2: | case 2: |
| value += (UINT32)cpu_memoryread_w(paddr) << shift; | value = cpu_memoryread_w(addr); |
| shift += 16; | value += (UINT32)cpu_memoryread_w(addr2) << 16; |
| break; | break; |
| case 1: | case 1: |
| value += (UINT32)cpu_memoryread(paddr) << shift; | value = cpu_memoryread(addr); |
| shift += 8; | value += (UINT32)cpu_memoryread(addr2) << 8; |
| value += (UINT32)cpu_memoryread_w(addr2 + 1) << 16; | |
| break; | break; |
| default: | default: |
| ia32_panic("cpu_linear_memory_read(): out of range (r = %d)\n", r); | ia32_panic("cpu_linear_memory_read_d(): out of range (remain = %d)\n", remain); |
| value = 0; /* compiler happy */ | |
| break; | break; |
| } | } |
| return value; | |
| } | |
| } | |
| length -= r; | void MEMCALL |
| if (length == 0) | cpu_linear_memory_write_b(UINT32 laddr, UINT8 value, const int user_mode) |
| { | |
| const int crw = (CPU_PAGE_WRITE|CPU_PAGE_DATA); | |
| UINT32 addr; | |
| addr = paging(laddr, crw, user_mode); | |
| cpu_memorywrite(addr, value); | |
| } | |
| void MEMCALL | |
| cpu_linear_memory_write_w(UINT32 laddr, UINT16 value, const int user_mode) | |
| { | |
| const int crw = (CPU_PAGE_WRITE|CPU_PAGE_DATA); | |
| UINT32 addr, addr2; | |
| addr = paging(laddr, crw, user_mode); | |
| if ((laddr + 1) & 0x00000fff) { | |
| cpu_memorywrite_w(addr, value); | |
| } else { | |
| addr2 = paging(laddr + 1, crw, user_mode); | |
| cpu_memorywrite(addr, (UINT8)value); | |
| cpu_memorywrite(addr2, (UINT8)(value >> 8)); | |
| } | |
| } | |
| void MEMCALL | |
| cpu_linear_memory_write_d(UINT32 laddr, UINT32 value, const int user_mode) | |
| { | |
| const int crw = (CPU_PAGE_WRITE|CPU_PAGE_DATA); | |
| UINT32 addr, addr2; | |
| UINT remain; | |
| addr = paging(laddr, crw, user_mode); | |
| remain = 0x1000 - (laddr & 0x00000fff); | |
| if (remain >= 4) { | |
| cpu_memorywrite_d(addr, value); | |
| } else { | |
| addr2 = paging(laddr + remain, crw, user_mode); | |
| switch (remain) { | |
| case 3: | |
| cpu_memorywrite(addr, (UINT8)value); | |
| cpu_memorywrite_w(addr + 1, (UINT16)(value >> 8)); | |
| cpu_memorywrite(addr2, (UINT8)(value >> 24)); | |
| break; | break; |
| laddr += r; | case 2: |
| remain -= r; | cpu_memorywrite_w(addr, (UINT16)value); |
| if (remain <= 0) { | cpu_memorywrite_w(addr2, (UINT16)(value >> 16)); |
| /* next page */ | break; |
| remain += 0x1000; | |
| case 1: | |
| cpu_memorywrite(addr, (UINT8)value); | |
| cpu_memorywrite(addr2, (UINT8)(value >> 8)); | |
| cpu_memorywrite_w(addr2 + 1, (UINT16)(value >> 16)); | |
| break; | |
| } | } |
| } | } |
| } | |
| #else /* !IA32_PAGING_EACHSIZE */ | |
| UINT32 MEMCALL | |
| cpu_memory_access_la_RMW(UINT32 laddr, UINT length, UINT32 (*func)(UINT32, void *), void *arg) | |
| { | |
| const int crw = (CPU_PAGE_WRITE|CPU_PAGE_DATA); | |
| const int user_mode = CPU_STAT_USER_MODE; | |
| UINT32 result, value; | |
| UINT32 paddr[2]; | |
| UINT remain; | |
| paddr[0] = paging(laddr, crw, user_mode); | |
| remain = 0x1000 - (laddr & 0x00000fff); | |
| if (remain >= length) { | |
| /* fast mode */ | |
| switch (length) { | |
| case 4: | |
| value = cpu_memoryread_d(paddr[0]); | |
| result = (*func)(value, arg); | |
| cpu_memorywrite_d(paddr[0], result); | |
| break; | |
| case 2: | |
| value = cpu_memoryread_w(paddr[0]); | |
| result = (*func)(value, arg); | |
| cpu_memorywrite_w(paddr[0], (UINT16)result); | |
| break; | |
| case 1: | |
| value = cpu_memoryread(paddr[0]); | |
| result = (*func)(value, arg); | |
| cpu_memorywrite(paddr[0], (UINT8)result); | |
| break; | |
| default: | |
| ia32_panic("cpu_memory_access_la_RMW(): invalid length (length = %d)\n", length); | |
| value = 0; /* compiler happy */ | |
| break; | |
| } | |
| return value; | |
| } | |
| /* slow mode */ | |
| paddr[1] = paging(laddr + remain, crw, user_mode); | |
| switch (remain) { | |
| case 3: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT32)cpu_memoryread_w(paddr[0] + 1) << 8; | |
| value += (UINT32)cpu_memoryread(paddr[1]) << 24; | |
| result = (*func)(value, arg); | |
| cpu_memorywrite(paddr[0], (UINT8)result); | |
| cpu_memorywrite_w(paddr[0] + 1, (UINT16)(result >> 8)); | |
| cpu_memorywrite(paddr[1], (BYTE)(result >> 24)); | |
| break; | |
| case 2: | |
| value = cpu_memoryread_w(paddr[0]); | |
| value += (UINT32)cpu_memoryread_w(paddr[1]) << 16; | |
| result = (*func)(value, arg); | |
| cpu_memorywrite_w(paddr[0], (UINT16)result); | |
| cpu_memorywrite_w(paddr[1], (UINT16)(result >> 16)); | |
| break; | |
| case 1: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT32)cpu_memoryread(paddr[1]) << 8; | |
| if (length == 4) { | |
| value += (UINT32)cpu_memoryread_w(paddr[1] + 1) << 16; | |
| } | |
| result = (*func)(value, arg); | |
| cpu_memorywrite(paddr[0], (UINT8)result); | |
| cpu_memorywrite(paddr[1], (UINT8)(result >> 8)); | |
| if (length == 4) { | |
| cpu_memorywrite_w(paddr[1] + 1, (UINT16)(result >> 16)); | |
| } | |
| break; | |
| default: | |
| ia32_panic("cpu_memory_access_la_RMW(): out of range (remain = %d)\n", remain); | |
| value = 0; /* compiler happy */ | |
| break; | |
| } | |
| return value; | return value; |
| } | } |
| void MEMCALL | UINT32 MEMCALL |
| cpu_linear_memory_write(UINT32 laddr, UINT32 value, UINT length, int user_mode) | cpu_linear_memory_read(UINT32 laddr, UINT length, const int crw, const int user_mode) |
| { | { |
| UINT32 paddr; | UINT32 value; |
| UINT remain; /* page remain */ | UINT32 paddr[2]; |
| UINT r; | UINT remain; |
| int crw = (CPU_PAGE_WRITE|CPU_PAGE_DATA); | |
| /* XXX: 4MB pages... */ | paddr[0] = paging(laddr, crw, user_mode); |
| remain = 0x1000 - (laddr & 0x00000fff); | remain = 0x1000 - (laddr & 0x00000fff); |
| for (;;) { | if (remain >= length) { |
| paddr = paging(laddr, crw, user_mode); | /* fast mode */ |
| switch (length) { | |
| case 4: | |
| value = cpu_memoryread_d(paddr[0]); | |
| break; | |
| r = (remain > length) ? length : remain; | case 2: |
| switch (r) { | value = cpu_memoryread_w(paddr[0]); |
| break; | |
| case 1: | |
| value = cpu_memoryread(paddr[0]); | |
| break; | |
| default: | |
| ia32_panic("cpu_linear_memory_read(): invalid length (length = %d)\n", length); | |
| value = 0; /* compiler happy */ | |
| break; | |
| } | |
| return value; | |
| } | |
| /* slow mode */ | |
| paddr[1] = paging(laddr + remain, crw, user_mode); | |
| switch (remain) { | |
| case 3: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT32)cpu_memoryread_w(paddr[0] + 1) << 8; | |
| value += (UINT32)cpu_memoryread(paddr[1]) << 24; | |
| break; | |
| case 2: | |
| value = cpu_memoryread_w(paddr[0]); | |
| value += (UINT32)cpu_memoryread_w(paddr[1]) << 16; | |
| break; | |
| case 1: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT32)cpu_memoryread(paddr[1]) << 8; | |
| if (length == 4) { | |
| value += (UINT32)cpu_memoryread_w(paddr[1] + 1) << 16; | |
| } | |
| break; | |
| default: | |
| ia32_panic("cpu_linear_memory_read(): out of range (remain = %d)\n", remain); | |
| value = 0; /* compiler happy */ | |
| break; | |
| } | |
| return value; | |
| } | |
| void MEMCALL | |
| cpu_linear_memory_write(UINT32 laddr, UINT32 value, UINT length, const int user_mode) | |
| { | |
| const int crw = (CPU_PAGE_WRITE|CPU_PAGE_DATA); | |
| UINT32 paddr[2]; | |
| UINT remain; | |
| paddr[0] = paging(laddr, crw, user_mode); | |
| remain = 0x1000 - (laddr & 0x00000fff); | |
| if (remain >= length) { | |
| /* fast mode */ | |
| switch (length) { | |
| case 4: | case 4: |
| cpu_memorywrite_d(paddr, value); | cpu_memorywrite_d(paddr[0], value); |
| break; | break; |
| case 3: | |
| cpu_memorywrite(paddr, value & 0xff); | |
| value >>= 8; | |
| paddr++; | |
| /*FALLTHROUGH*/ | |
| case 2: | case 2: |
| cpu_memorywrite_w(paddr, value & 0xffff); | cpu_memorywrite_w(paddr[0], (UINT16)value); |
| value >>= 16; | |
| break; | break; |
| case 1: | case 1: |
| cpu_memorywrite(paddr, value & 0xff); | cpu_memorywrite(paddr[0], (UINT8)value); |
| value >>= 8; | |
| break; | break; |
| default: | default: |
| ia32_panic("cpu_linear_memory_write(): out of range (r = %d)\n", r); | ia32_panic("cpu_linear_memory_write(): invalid length (length = %d)\n", length); |
| break; | break; |
| } | } |
| return; | |
| } | |
| /* slow mode */ | |
| paddr[1] = paging(laddr + remain, crw, user_mode); | |
| switch (remain) { | |
| case 3: | |
| cpu_memorywrite(paddr[0], (UINT8)value); | |
| cpu_memorywrite_w(paddr[0] + 1, (UINT16)(value >> 8)); | |
| cpu_memorywrite(paddr[1], (UINT8)(value >> 24)); | |
| break; | |
| case 2: | |
| cpu_memorywrite_w(paddr[0], (UINT16)value); | |
| cpu_memorywrite_w(paddr[1], (UINT16)(value >> 16)); | |
| break; | |
| case 1: | |
| cpu_memorywrite(paddr[0], (UINT8)value); | |
| cpu_memorywrite(paddr[1], (UINT8)(value >> 8)); | |
| if (length == 4) { | |
| cpu_memorywrite_w(paddr[1] + 1, (UINT16)(value >> 16)); | |
| } | |
| break; | |
| default: | |
| ia32_panic("cpu_linear_memory_write(): out of range (remain = %d)\n", remain); | |
| break; | |
| } | |
| } | |
| #endif /* IA32_PAGING_EACHSIZE */ | |
| void MEMCALL | |
| cpu_memory_access_la_region(UINT32 laddr, UINT length, const int crw, const int user_mode, BYTE *data) | |
| { | |
| UINT32 paddr; | |
| UINT remain; /* page remain */ | |
| UINT r; | |
| if (length == 0) | |
| return; | |
| remain = 0x1000 - (laddr & 0x00000fff); | |
| for (;;) { | |
| if (!CPU_STAT_PAGING) { | |
| paddr = laddr; | |
| } else { | |
| paddr = paging(laddr, crw, user_mode); | |
| } | |
| r = (remain > length) ? length : remain; | |
| if (!(crw & CPU_PAGE_WRITE)) { | |
| cpu_memoryread_region(paddr, data, r); | |
| } else { | |
| cpu_memorywrite_region(paddr, data, r); | |
| } | |
| length -= r; | length -= r; |
| if (length == 0) | if (length == 0) |
| break; | break; |
| data += r; | |
| laddr += r; | laddr += r; |
| remain -= r; | remain -= r; |
| if (remain <= 0) { | if (remain <= 0) { |
| Line 299 cpu_linear_memory_write(UINT32 laddr, UI | Line 659 cpu_linear_memory_write(UINT32 laddr, UI |
| } | } |
| void MEMCALL | void MEMCALL |
| paging_check(UINT32 laddr, UINT length, int crw, int user_mode) | paging_check(UINT32 laddr, UINT length, const int crw, const int user_mode) |
| { | { |
| UINT32 paddr; | UINT32 paddr; |
| UINT remain; /* page remain */ | UINT remain; /* page remain */ |
| UINT r; | UINT r; |
| /* XXX: 4MB pages... */ | |
| remain = 0x1000 - (laddr & 0x00000fff); | remain = 0x1000 - (laddr & 0x00000fff); |
| for (;;) { | for (;;) { |
| paddr = paging(laddr, crw, user_mode); | paddr = paging(laddr, crw, user_mode); |
| Line 326 paging_check(UINT32 laddr, UINT length, | Line 685 paging_check(UINT32 laddr, UINT length, |
| } | } |
| static UINT32 | static UINT32 |
| paging(UINT32 laddr, int crw, int user_mode) | paging(const UINT32 laddr, const int crw, const int user_mode) |
| { | { |
| UINT32 paddr; /* physical address */ | UINT32 paddr; /* physical address */ |
| UINT32 pde_addr; /* page directory entry address */ | UINT32 pde_addr; /* page directory entry address */ |
| Line 355 paging(UINT32 laddr, int crw, int user_m | Line 714 paging(UINT32 laddr, int crw, int user_m |
| cpu_memorywrite_d(pde_addr, pde); | cpu_memorywrite_d(pde_addr, pde); |
| } | } |
| #if CPU_FAMILY >= 5 | pte_addr = (pde & CPU_PDE_BASEADDR_MASK) + ((laddr >> 10) & 0xffc); |
| /* no support PAE */ | pte = cpu_memoryread_d(pte_addr); |
| __ASSERT(!(CPU_CR4 & CPU_CR4_PAE)); | if (!(pte & CPU_PTE_PRESENT)) { |
| VERBOSE(("paging: page is not present")); | |
| if ((CPU_CR4 & CPU_CR4_PSE) && (pde & CPU_PDE_PAGE_SIZE)) { | VERBOSE(("paging: laddr = 0x%08x, pde_addr = 0x%08x, pde = 0x%08x", laddr, pde_addr, pde)); |
| /* 4MB page size */ | VERBOSE(("paging: pte_addr = 0x%08x, pte = 0x%08x", pte_addr, pte)); |
| err = 0; | |
| /* fake PTE bit */ | goto pf_exception; |
| pte = pde | CPU_PTE_DIRTY; | } |
| pte_addr = 0; /* compiler happy */ | if (!(pte & CPU_PTE_ACCESS)) { |
| pte |= CPU_PTE_ACCESS; | |
| /* make physical address */ | cpu_memorywrite_d(pte_addr, pte); |
| paddr = (pde & CPU_PDE_4M_BASEADDR_MASK) + (laddr & 0x003fffff); | |
| } else | |
| #endif /* CPU_FAMILY >= 5 */ | |
| { | |
| /* 4KB page size */ | |
| pte_addr = (pde & CPU_PDE_BASEADDR_MASK) + ((laddr >> 10) & 0xffc); | |
| pte = cpu_memoryread_d(pte_addr); | |
| if (!(pte & CPU_PTE_PRESENT)) { | |
| VERBOSE(("paging: page is not present")); | |
| VERBOSE(("paging: laddr = 0x%08x, pde_addr = 0x%08x, pde = 0x%08x", laddr, pde_addr, pde)); | |
| VERBOSE(("paging: pte_addr = 0x%08x, pte = 0x%08x", pte_addr, pte)); | |
| err = 0; | |
| goto pf_exception; | |
| } | |
| if (!(pte & CPU_PTE_ACCESS)) { | |
| pte |= CPU_PTE_ACCESS; | |
| cpu_memorywrite_d(pte_addr, pte); | |
| } | |
| /* make physical address */ | |
| paddr = (pte & CPU_PTE_BASEADDR_MASK) + (laddr & 0x00000fff); | |
| } | } |
| /* make physical address */ | |
| paddr = (pte & CPU_PTE_BASEADDR_MASK) + (laddr & 0x00000fff); | |
| bit = crw & CPU_PAGE_WRITE; | bit = crw & CPU_PAGE_WRITE; |
| bit |= (pde & pte & (CPU_PTE_WRITABLE|CPU_PTE_USER_MODE)); | bit |= (pde & pte & (CPU_PTE_WRITABLE|CPU_PTE_USER_MODE)); |
| bit |= (user_mode << 3); | bit |= (user_mode << 3); |
| Line 415 paging(UINT32 laddr, int crw, int user_m | Line 756 paging(UINT32 laddr, int crw, int user_m |
| } | } |
| #if defined(IA32_SUPPORT_TLB) | #if defined(IA32_SUPPORT_TLB) |
| tlb_update(paddr, pte, crw); | tlb_update(laddr, pte, crw); |
| #endif /* IA32_SUPPORT_TLB */ | #endif /* IA32_SUPPORT_TLB */ |
| return paddr; | return paddr; |
| Line 431 pf_exception: | Line 772 pf_exception: |
| /* | /* |
| * TLB | * TLB |
| */ | */ |
| #if defined(IA32_PROFILE_TLB) | |
| /* profiling */ | |
| typedef struct { | typedef struct { |
| UINT8 valid; /* TLB entry is valid */ | UINT64 tlb_hits; |
| UINT8 global; /* this TLB entry is global */ | UINT64 tlb_misses; |
| UINT8 score; | UINT64 tlb_lookups; |
| UINT8 pad; | UINT64 tlb_updates; |
| UINT64 tlb_flushes; | |
| UINT64 tlb_global_flushes; | |
| UINT64 tlb_entry_flushes; | |
| } TLB_PROFILE_T; | |
| UINT32 tag; | static TLB_PROFILE_T tlb_profile; |
| UINT32 mask; /* 4K or 2M or 4M */ | |
| #define PROFILE_INC(v) tlb_profile.v++ | |
| #else /* !IA32_PROFILE_TLB */ | |
| #define PROFILE_INC(v) | |
| #endif /* IA32_PROFILE_TLB */ | |
| UINT32 paddr; /* physical addr */ | |
| } TLB_ENTRY_T; | |
| typedef struct { | typedef struct { |
| UINT8 kind; | UINT32 tag; /* linear address */ |
| #define TLB_KIND_INSTRUCTION (1 << 1) | #define TLB_ENTRY_VALID (1 << 0) |
| #define TLB_KIND_DATA (1 << 2) | #define TLB_ENTRY_GLOBAL CPU_PTE_GLOBAL_PAGE |
| #define TLB_KIND_COMBINE (TLB_KIND_INSTRUCTION|TLB_KIND_DATA) | |
| #define TLB_KIND_SMALL (1 << 3) | |
| #define TLB_KIND_LARGE (1 << 4) | |
| #define TLB_KIND_BOTH (TLB_KIND_SMALL|TLB_KIND_LARGE) | |
| UINT8 way; /* n-way associative */ | |
| UINT8 idx; /* number of TLB index */ | |
| UINT8 bpad; | |
| UINT16 num; /* number of TLB entry */ | UINT32 paddr; /* physical address */ |
| UINT16 wpad; | } TLB_ENTRY_T; |
| TLB_ENTRY_T *entry; /* entry[assoc][idx] or entry[assoc] if idx == 1*/ | #define TLB_GET_PADDR(ep, addr) ((ep)->paddr + ((addr) & ~CPU_PTE_BASEADDR_MASK)) |
| } TLB_T; | #define TLB_SET_PADDR(ep, addr) ((ep)->paddr = (addr) & CPU_PTE_BASEADDR_MASK) |
| static int ntlb; | #define TLB_TAG_SHIFT 17 |
| static TLB_T tlb[4]; /* i TLB, i (lp) TLB, d TLB, d (lp) TLB */ | #define TLB_TAG_MASK ~((1 << TLB_TAG_SHIFT) - 1) |
| #define TLB_GET_TAG_ADDR(ep) ((ep)->tag & TLB_TAG_MASK) | |
| #define TLB_SET_TAG_ADDR(ep, addr) \ | |
| ((ep)->tag = ((addr) & TLB_TAG_MASK) + ((ep)->tag & ~TLB_TAG_MASK)) | |
| #define TLB_IS_VALID(ep) ((ep)->tag & TLB_ENTRY_VALID) | |
| #define TLB_SET_VALID(ep) ((ep)->tag |= TLB_ENTRY_VALID) | |
| #define TLB_CLEAR_VALID(ep) ((ep)->tag &= ~TLB_ENTRY_VALID) | |
| #if defined(IA32_PROFILE_TLB) | #if CPU_FAMILY == 4 |
| /* profiling */ | #define TLB_IS_GLOBAL(ep) FALSE |
| static UINT64 tlb_hits; | #define TLB_SET_GLOBAL(ep) (void)(ep) |
| static UINT64 tlb_misses; | #define TLB_CLEAR_GLOBAL(ep) (void)(ep) |
| static UINT64 tlb_lookups; | #else |
| static UINT64 tlb_updates; | #define TLB_IS_GLOBAL(ep) ((ep)->tag & TLB_ENTRY_GLOBAL) |
| static UINT64 tlb_flushes; | #define TLB_SET_GLOBAL(ep) ((ep)->tag |= TLB_ENTRY_GLOBAL) |
| static UINT64 tlb_global_flushes; | #define TLB_CLEAR_GLOBAL(ep) ((ep)->tag &= ~TLB_ENTRY_GLOBAL) |
| static UINT64 tlb_entry_flushes; | #endif |
| #if CPU_FAMILY == 4 | |
| #define NTLB 1 | |
| #define NENTRY (1 << 3) | |
| #define NWAY (1 << 2) | |
| #define TLB_ENTRY_SHIFT 12 | |
| #define TLB_ENTRY_MASK (NENTRY - 1) | |
| #define TLB_WAY_SHIFT 15 | |
| #define TLB_WAY_MASK (NWAY - 1) | |
| #endif | |
| typedef struct { | |
| TLB_ENTRY_T entry[NENTRY][NWAY]; | |
| } TLB_T; | |
| static TLB_T tlb; | |
| #define PROFILE_INC(v) (v)++; | |
| #else /* !IA32_PROFILE_TLB */ | |
| #define PROFILE_INC(v) | |
| #endif /* IA32_PROFILE_TLB */ | |
| void | void |
| tlb_init(void) | tlb_init(void) |
| { | { |
| int i; | |
| for (i = 0; i < NELEMENTS(tlb); i++) { | |
| if (tlb[i].entry) { | |
| free(tlb[i].entry); | |
| } | |
| } | |
| memset(tlb, 0, sizeof(tlb)); | |
| memset(&tlb, 0, sizeof(tlb)); | |
| #if defined(IA32_PROFILE_TLB) | #if defined(IA32_PROFILE_TLB) |
| tlb_hits = 0; | memset(&tlb_profile, 0, sizeof(tlb_profile)); |
| tlb_misses = 0; | |
| tlb_lookups = 0; | |
| tlb_updates = 0; | |
| tlb_flushes = 0; | |
| tlb_global_flushes = 0; | |
| tlb_entry_flushes = 0; | |
| #endif /* IA32_PROFILE_TLB */ | #endif /* IA32_PROFILE_TLB */ |
| #if CPU_FAMILY == 4 | |
| /* とりあえず i486 形式で… */ | |
| /* combine (I/D) TLB: 4KB Pages, 4-way set associative 32 entries */ | |
| ntlb = 1; | |
| tlb[0].kind = TLB_KIND_COMBINE | TLB_KIND_SMALL; | |
| tlb[0].num = 32; | |
| tlb[0].way = 4; | |
| #endif | |
| for (i = 0; i < ntlb; i++) { | |
| tlb[i].idx = tlb[i].num / tlb[i].way; | |
| tlb[i].entry = (TLB_ENTRY_T*)calloc(sizeof(TLB_ENTRY_T), tlb[i].num); | |
| if (tlb[i].entry == 0) { | |
| ia32_panic("tlb_init(): can't alloc TLB entry\n"); | |
| } | |
| } | |
| } | } |
| void | void |
| Line 533 tlb_flush(BOOL allflush) | Line 865 tlb_flush(BOOL allflush) |
| PROFILE_INC(tlb_flushes); | PROFILE_INC(tlb_flushes); |
| } | } |
| for (i = 0; i < ntlb; i++) { | for (i = 0; i < NENTRY ; i++) { |
| ep = tlb[i].entry; | for (j = 0; j < NWAY; j++) { |
| for (j = 0; j < tlb[i].num; j++, ep++) { | ep = &tlb.entry[i][j]; |
| if (ep->valid && (allflush || !ep->global)) { | if (TLB_IS_VALID(ep) && (!TLB_IS_GLOBAL(ep) || allflush)) { |
| ep->valid = 0; | TLB_CLEAR_VALID(ep); |
| PROFILE_INC(tlb_entry_flushes); | PROFILE_INC(tlb_entry_flushes); |
| } | } |
| } | } |
| Line 545 tlb_flush(BOOL allflush) | Line 877 tlb_flush(BOOL allflush) |
| } | } |
| void | void |
| tlb_flush_page(UINT32 vaddr) | tlb_flush_page(UINT32 laddr) |
| { | { |
| TLB_ENTRY_T *ep; | TLB_ENTRY_T *ep; |
| int idx; | int idx; |
| int i; | int way; |
| for (i = 0; i < ntlb; i++) { | PROFILE_INC(tlb_flushes); |
| if (tlb[i].idx == 1) { | |
| /* fully set associative */ | |
| idx = 0; | |
| } else { | |
| if (tlb[i].kind & TLB_KIND_SMALL) { | |
| idx = (vaddr >> 12) & (tlb[i].idx - 1); | |
| } else { | |
| idx = (vaddr >> 22) & (tlb[i].idx - 1); | |
| } | |
| } | |
| /* search */ | idx = (laddr >> TLB_ENTRY_SHIFT) & (NENTRY - 1); |
| ep = &tlb[i].entry[idx * tlb[i].way]; | way = (laddr >> TLB_WAY_SHIFT) & (NWAY - 1); |
| for (i = 0; i < tlb[i].way; i++) { | ep = &tlb.entry[idx][way]; |
| if (ep->valid) { | |
| if ((vaddr & ep->mask) == ep->tag) { | if (TLB_IS_VALID(ep)) { |
| ep->valid = 0; | if ((laddr & TLB_TAG_MASK) == TLB_GET_TAG_ADDR(ep)) { |
| PROFILE_INC(tlb_entry_flushes); | TLB_CLEAR_VALID(ep); |
| break; | return; |
| } | |
| } | |
| } | } |
| } | } |
| } | } |
| static BOOL | static BOOL |
| tlb_lookup(UINT32 laddr, int crw, UINT32 *paddr) | tlb_lookup(const UINT32 laddr, const int crw, UINT32 *paddr) |
| { | { |
| TLB_ENTRY_T *ep; | TLB_ENTRY_T *ep; |
| int idx; | int idx; |
| int i; | int way; |
| PROFILE_INC(tlb_lookups); | PROFILE_INC(tlb_lookups); |
| crw &= CPU_PAGE_CODE | CPU_PAGE_DATA; | idx = (laddr >> TLB_ENTRY_SHIFT) & (NENTRY - 1); |
| for (i = 0; i < ntlb; i++) { | way = (laddr >> TLB_WAY_SHIFT) & (NWAY - 1); |
| if (tlb[i].kind & crw) { | ep = &tlb.entry[idx][way]; |
| if (tlb[i].idx == 1) { | |
| /* fully set associative */ | ep = &tlb.entry[idx][way]; |
| idx = 0; | if (TLB_IS_VALID(ep)) { |
| } else { | if ((laddr & TLB_TAG_MASK) == TLB_GET_TAG_ADDR(ep)) { |
| if (tlb[i].kind & TLB_KIND_SMALL) { | *paddr = TLB_GET_PADDR(ep, laddr); |
| idx = (laddr >> 12) & (tlb[i].idx - 1); | PROFILE_INC(tlb_hits); |
| } else { | return TRUE; |
| idx = (laddr >> 22) & (tlb[i].idx - 1); | |
| } | |
| } | |
| /* search */ | |
| ep = &tlb[i].entry[idx * tlb[i].way]; | |
| for (i = 0; i < tlb[i].way; i++) { | |
| if (ep->valid) { | |
| if ((laddr & ep->mask) == ep->tag) { | |
| if (ep->score != (UINT8)~0) | |
| ep->score++; | |
| *paddr = ep->paddr; | |
| PROFILE_INC(tlb_hits); | |
| return TRUE; | |
| } | |
| } | |
| } | |
| } | } |
| } | } |
| (void)crw; | |
| PROFILE_INC(tlb_misses); | PROFILE_INC(tlb_misses); |
| return FALSE; | return FALSE; |
| } | } |
| static void | static void |
| tlb_update(UINT32 paddr, UINT entry, int crw) | tlb_update(const UINT32 laddr, const UINT entry, const int crw) |
| { | { |
| TLB_ENTRY_T *ep; | TLB_ENTRY_T *ep; |
| int idx; | int idx; |
| int i, j; | int way; |
| int min_way; | |
| UINT16 min_score = ~0; | |
| PROFILE_INC(tlb_updates); | PROFILE_INC(tlb_updates); |
| crw &= CPU_PAGE_CODE | CPU_PAGE_DATA; | idx = (laddr >> TLB_ENTRY_SHIFT) & (NENTRY - 1); |
| for (i = 0; i < ntlb; i++) { | way = (laddr >> TLB_WAY_SHIFT) & (NWAY - 1); |
| if (tlb[i].kind & crw) { | ep = &tlb.entry[idx][way]; |
| if (tlb[i].idx == 1) { | |
| /* fully set associative */ | |
| idx = 0; | |
| } else { | |
| /* n-way set associative */ | |
| if (!(entry & CPU_PDE_PAGE_SIZE)) { | |
| if (!(tlb[i].kind & TLB_KIND_SMALL)) | |
| continue; | |
| idx = (entry >> 12) & (tlb[i].idx - 1); | |
| } else { | |
| if (!(tlb[i].kind & TLB_KIND_LARGE)) | |
| continue; | |
| idx = (entry >> 22) & (tlb[i].idx - 1); | |
| } | |
| } | |
| /* search */ | TLB_SET_VALID(ep); |
| ep = &tlb[i].entry[idx * tlb[i].way]; | #if CPU_FAMILY >= 5 |
| for (min_way = 0, j = 0; j < tlb[i].way; j++, ep++) { | if (entry & CPU_PTE_GLOBAL_PAGE) { |
| if (ep->valid) { | TLB_SET_GLOBAL(ep); |
| if (min_score >= ep->score) { | |
| min_way = j; | |
| min_score = ep->score; | |
| } | |
| } else { | |
| min_way = j; | |
| min_score = 0; | |
| break; | |
| } | |
| } | |
| /* replace */ | |
| ep = &tlb[i].entry[idx * tlb[i].way + min_way]; | |
| ep->valid = 1; | |
| ep->global = (entry & CPU_PTE_GLOBAL_PAGE) ? 1 : 0; | |
| ep->score = 0; | |
| ep->mask = (entry & CPU_PDE_PAGE_SIZE) ? CPU_PDE_4M_BASEADDR_MASK : CPU_PTE_BASEADDR_MASK; | |
| ep->tag = entry & ep->mask; | |
| ep->paddr = paddr; | |
| break; | |
| } | |
| } | } |
| __ASSERT(i != ntlb); | #endif |
| TLB_SET_TAG_ADDR(ep, laddr); | |
| TLB_SET_PADDR(ep, entry); | |
| (void)crw; | |
| } | } |
| #endif /* IA32_SUPPORT_TLB */ | #endif /* IA32_SUPPORT_TLB */ |