|
|
| version 1.10, 2004/02/04 13:24:35 | version 1.30, 2008/03/22 04:03:07 |
|---|---|
| Line 1 | Line 1 |
| /* $Id$ */ | /* $Id$ */ |
| /* | /* |
| * Copyright (c) 2003 NONAKA Kimihiro | * Copyright (c) 2003-2004 NONAKA Kimihiro |
| * All rights reserved. | * All rights reserved. |
| * | * |
| * Redistribution and use in source and binary forms, with or without | * Redistribution and use in source and binary forms, with or without |
| Line 12 | Line 12 |
| * 2. Redistributions in binary form must reproduce the above copyright | * 2. Redistributions in binary form must reproduce the above copyright |
| * notice, this list of conditions and the following disclaimer in the | * notice, this list of conditions and the following disclaimer in the |
| * documentation and/or other materials provided with the distribution. | * documentation and/or other materials provided with the distribution. |
| * 3. The name of the author may not be used to endorse or promote products | |
| * derived from this software without specific prior written permission. | |
| * | * |
| * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| Line 114 | Line 112 |
| #if !defined(USE_PAGE_ACCESS_TABLE) | #if !defined(USE_PAGE_ACCESS_TABLE) |
| #define page_access 0xd0ddd0ff | #define page_access 0xd0ddd0ff |
| #else /* USE_PAGE_ACCESS_TABLE */ | #else /* USE_PAGE_ACCESS_TABLE */ |
| static const BYTE page_access_bit[32] = { | static const UINT8 page_access_bit[32] = { |
| 1, /* CR0: n, CPL: s, PTE: s, PTE: r, ope: r */ | 1, /* CR0: n, CPL: s, PTE: s, PTE: r, ope: r */ |
| 1, /* CR0: n, CPL: s, PTE: s, PTE: r, ope: w */ | 1, /* CR0: n, CPL: s, PTE: s, PTE: r, ope: w */ |
| 1, /* CR0: n, CPL: s, PTE: s, PTE: w, ope: r */ | 1, /* CR0: n, CPL: s, PTE: s, PTE: w, ope: r */ |
| Line 184 static const BYTE page_access_bit[32] = | Line 182 static const BYTE page_access_bit[32] = |
| * +- CR3(物理アドレス) | * +- CR3(物理アドレス) |
| */ | */ |
| static DWORD paging(DWORD laddr, int crw, int user_mode); | static UINT32 MEMCALL paging(const UINT32 laddr, const int ucrw); |
| #if defined(IA32_SUPPORT_TLB) | #if defined(IA32_SUPPORT_TLB) |
| static BOOL tlb_lookup(DWORD vaddr, int crw, DWORD* paddr); | static void MEMCALL tlb_update(const UINT32 laddr, const UINT entry, const int ucrw); |
| static void tlb_update(DWORD paddr, DWORD entry, int crw); | |
| #endif | #endif |
| #define PAGE_SIZE 0x1000 | |
| #define PAGE_MASK (PAGE_SIZE - 1) | |
| DWORD MEMCALL | UINT8 MEMCALL |
| cpu_linear_memory_read(DWORD laddr, DWORD length, int crw, int user_mode) | cpu_memory_access_la_RMW_b(UINT32 laddr, UINT32 (*func)(UINT32, void *), void *arg) |
| { | { |
| DWORD paddr; | const int ucrw = CPU_PAGE_WRITE_DATA|CPU_STAT_USER_MODE; |
| DWORD remain; /* page remain */ | UINT32 result, value; |
| DWORD r; | UINT32 paddr; |
| DWORD shift = 0; | |
| DWORD value = 0; | paddr = paging(laddr, ucrw); |
| value = cpu_memoryread(paddr); | |
| result = (*func)(value, arg); | |
| cpu_memorywrite(paddr, (UINT8)result); | |
| /* XXX: 4MB pages... */ | return value; |
| remain = 0x1000 - (laddr & 0x00000fff); | } |
| for (;;) { | |
| paddr = paging(laddr, crw, user_mode); | |
| r = (remain > length) ? length : remain; | UINT16 MEMCALL |
| switch (r) { | cpu_memory_access_la_RMW_w(UINT32 laddr, UINT32 (*func)(UINT32, void *), void *arg) |
| case 4: | { |
| value = cpu_memoryread_d(paddr); | const int ucrw = CPU_PAGE_WRITE_DATA|CPU_STAT_USER_MODE; |
| UINT32 result, value; | |
| UINT32 paddr[2]; | |
| paddr[0] = paging(laddr, ucrw); | |
| if ((laddr + 1) & PAGE_MASK) { | |
| value = cpu_memoryread_w(paddr[0]); | |
| result = (*func)(value, arg); | |
| cpu_memorywrite_w(paddr[0], (UINT16)result); | |
| } else { | |
| paddr[1] = paging(laddr + 1, ucrw); | |
| value = cpu_memoryread_b(paddr[0]); | |
| value += (UINT16)cpu_memoryread_b(paddr[1]) << 8; | |
| result = (*func)(value, arg); | |
| cpu_memorywrite(paddr[0], (UINT8)result); | |
| cpu_memorywrite(paddr[1], (UINT8)(result >> 8)); | |
| } | |
| return value; | |
| } | |
| UINT32 MEMCALL | |
| cpu_memory_access_la_RMW_d(UINT32 laddr, UINT32 (*func)(UINT32, void *), void *arg) | |
| { | |
| const int ucrw = CPU_PAGE_WRITE_DATA|CPU_STAT_USER_MODE; | |
| UINT32 result, value; | |
| UINT32 paddr[2]; | |
| UINT remain; | |
| paddr[0] = paging(laddr, ucrw); | |
| remain = PAGE_SIZE - (laddr & PAGE_MASK); | |
| if (remain >= 4) { | |
| value = cpu_memoryread_d(paddr[0]); | |
| result = (*func)(value, arg); | |
| cpu_memorywrite_d(paddr[0], result); | |
| } else { | |
| paddr[1] = paging(laddr + remain, ucrw); | |
| switch (remain) { | |
| case 3: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT32)cpu_memoryread_w(paddr[0] + 1) << 8; | |
| value += (UINT32)cpu_memoryread(paddr[1]) << 24; | |
| result = (*func)(value, arg); | |
| cpu_memorywrite(paddr[0], (UINT8)result); | |
| cpu_memorywrite_w(paddr[0] + 1, (UINT16)(result >> 8)); | |
| cpu_memorywrite(paddr[1], (UINT8)(result >> 24)); | |
| break; | |
| case 2: | |
| value = cpu_memoryread_w(paddr[0]); | |
| value += (UINT32)cpu_memoryread_w(paddr[1]) << 16; | |
| result = (*func)(value, arg); | |
| cpu_memorywrite_w(paddr[0], (UINT16)result); | |
| cpu_memorywrite_w(paddr[1], (UINT16)(result >> 16)); | |
| break; | |
| case 1: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT32)cpu_memoryread_w(paddr[1]) << 8; | |
| value += (UINT32)cpu_memoryread(paddr[1] + 2) << 24; | |
| result = (*func)(value, arg); | |
| cpu_memorywrite(paddr[0], (UINT8)result); | |
| cpu_memorywrite_w(paddr[1], (UINT16)(result >> 8)); | |
| cpu_memorywrite(paddr[1] + 2, (UINT8)(result >> 24)); | |
| break; | break; |
| default: | |
| ia32_panic("cpu_memory_access_la_RMW_d(): out of range (remain = %d)\n", remain); | |
| return (UINT32)-1; | |
| } | |
| } | |
| return value; | |
| } | |
| UINT8 MEMCALL | |
| cpu_linear_memory_read_b(UINT32 laddr, const int ucrw) | |
| { | |
| UINT32 paddr; | |
| paddr = paging(laddr, ucrw); | |
| return cpu_memoryread(paddr); | |
| } | |
| UINT16 MEMCALL | |
| cpu_linear_memory_read_w(UINT32 laddr, const int ucrw) | |
| { | |
| UINT32 paddr[2]; | |
| UINT16 value; | |
| paddr[0] = paging(laddr, ucrw); | |
| if ((laddr + 1) & PAGE_MASK) { | |
| return cpu_memoryread_w(paddr[0]); | |
| } else { | |
| paddr[1] = paging(laddr + 1, ucrw); | |
| value = cpu_memoryread_b(paddr[0]); | |
| value += (UINT16)cpu_memoryread_b(paddr[1]) << 8; | |
| return value; | |
| } | |
| } | |
| UINT32 MEMCALL | |
| cpu_linear_memory_read_d(UINT32 laddr, const int ucrw) | |
| { | |
| UINT32 paddr[2]; | |
| UINT32 value; | |
| UINT remain; | |
| paddr[0] = paging(laddr, ucrw); | |
| remain = PAGE_SIZE - (laddr & PAGE_MASK); | |
| if (remain >= 4) { | |
| return cpu_memoryread_d(paddr[0]); | |
| } else { | |
| paddr[1] = paging(laddr + remain, ucrw); | |
| switch (remain) { | |
| case 3: | case 3: |
| value |= (DWORD)cpu_memoryread(paddr) << shift; | value = cpu_memoryread(paddr[0]); |
| shift += 8; | value += (UINT32)cpu_memoryread_w(paddr[0] + 1) << 8; |
| paddr++; | value += (UINT32)cpu_memoryread(paddr[1]) << 24; |
| /*FALLTHROUGH*/ | break; |
| case 2: | case 2: |
| value |= (DWORD)cpu_memoryread_w(paddr) << shift; | value = cpu_memoryread_w(paddr[0]); |
| shift += 16; | value += (UINT32)cpu_memoryread_w(paddr[1]) << 16; |
| break; | break; |
| case 1: | case 1: |
| value |= (DWORD)cpu_memoryread(paddr) << shift; | value = cpu_memoryread(paddr[0]); |
| shift += 8; | value += (UINT32)cpu_memoryread_w(paddr[1]) << 8; |
| value += (UINT32)cpu_memoryread(paddr[1] + 2) << 24; | |
| break; | break; |
| default: | default: |
| ia32_panic("cpu_linear_memory_read(): out of range (r = %d)\n", r); | ia32_panic("cpu_linear_memory_read_d(): out of range (remain = %d)\n", remain); |
| value = (UINT32)-1; | |
| break; | break; |
| } | } |
| return value; | |
| } | |
| } | |
| length -= r; | UINT64 MEMCALL |
| if (length == 0) | cpu_linear_memory_read_q(UINT32 laddr, const int ucrw) |
| { | |
| UINT32 paddr[2]; | |
| UINT64 value; | |
| UINT remain; | |
| paddr[0] = paging(laddr, ucrw); | |
| remain = PAGE_SIZE - (laddr & PAGE_MASK); | |
| if (remain >= 8) { | |
| return cpu_memoryread_q(paddr[0]); | |
| } else { | |
| paddr[1] = paging(laddr + remain, ucrw); | |
| switch (remain) { | |
| case 7: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT64)cpu_memoryread_w(paddr[0] + 1) << 8; | |
| value += (UINT64)cpu_memoryread_d(paddr[0] + 3) << 24; | |
| value += (UINT64)cpu_memoryread(paddr[1]) << 56; | |
| break; | |
| case 6: | |
| value = cpu_memoryread_w(paddr[0]); | |
| value += (UINT64)cpu_memoryread_d(paddr[0] + 2) << 16; | |
| value += (UINT64)cpu_memoryread_w(paddr[1]) << 48; | |
| break; | |
| case 5: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT64)cpu_memoryread_d(paddr[0] + 1) << 8; | |
| value += (UINT64)cpu_memoryread_w(paddr[1]) << 40; | |
| value += (UINT64)cpu_memoryread(paddr[1] + 2) << 56; | |
| break; | break; |
| laddr += r; | case 4: |
| remain -= r; | value = cpu_memoryread_d(paddr[0]); |
| if (remain <= 0) { | value += (UINT64)cpu_memoryread_d(paddr[1]) << 32; |
| /* next page */ | break; |
| remain += 0x1000; | |
| case 3: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT64)cpu_memoryread_w(paddr[0] + 1) << 8; | |
| value += (UINT64)cpu_memoryread_d(paddr[1]) << 24; | |
| value += (UINT64)cpu_memoryread(paddr[1] + 4) << 56; | |
| break; | |
| case 2: | |
| value = cpu_memoryread_w(paddr[0]); | |
| value += (UINT64)cpu_memoryread_d(paddr[1]) << 16; | |
| value += (UINT64)cpu_memoryread_w(paddr[1] + 4) << 48; | |
| break; | |
| case 1: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT64)cpu_memoryread_d(paddr[1]) << 8; | |
| value += (UINT64)cpu_memoryread_w(paddr[1] + 4) << 40; | |
| value += (UINT64)cpu_memoryread(paddr[1] + 6) << 56; | |
| break; | |
| default: | |
| ia32_panic("cpu_linear_memory_read_q(): out of range (remain = %d)\n", remain); | |
| value = (UINT64)-1; | |
| break; | |
| } | } |
| } | } |
| return value; | return value; |
| } | } |
| REG80 MEMCALL | |
| cpu_linear_memory_read_f(UINT32 laddr, const int ucrw) | |
| { | |
| UINT32 paddr[2]; | |
| REG80 value; | |
| UINT remain; | |
| UINT i, j; | |
| paddr[0] = paging(laddr, ucrw); | |
| remain = PAGE_SIZE - (laddr & PAGE_MASK); | |
| if (remain >= 10) { | |
| return cpu_memoryread_f(paddr[0]); | |
| } else { | |
| paddr[1] = paging(laddr + remain, ucrw); | |
| for (i = 0; i < remain; ++i) { | |
| value.b[i] = cpu_memoryread(paddr[0] + i); | |
| } | |
| for (j = 0; i < 10; ++i, ++j) { | |
| value.b[i] = cpu_memoryread(paddr[1] + j); | |
| } | |
| return value; | |
| } | |
| } | |
| void MEMCALL | void MEMCALL |
| cpu_linear_memory_write(DWORD laddr, DWORD value, DWORD length, int user_mode) | cpu_linear_memory_write_b(UINT32 laddr, UINT8 value, const int user_mode) |
| { | { |
| DWORD paddr; | const int ucrw = CPU_PAGE_WRITE_DATA|user_mode; |
| DWORD remain; /* page remain */ | UINT32 paddr; |
| DWORD r; | |
| int crw = (CPU_PAGE_WRITE|CPU_PAGE_DATA); | |
| /* XXX: 4MB pages... */ | paddr = paging(laddr, ucrw); |
| remain = 0x1000 - (laddr & 0x00000fff); | cpu_memorywrite(paddr, value); |
| for (;;) { | } |
| paddr = paging(laddr, crw, user_mode); | |
| r = (remain > length) ? length : remain; | void MEMCALL |
| switch (r) { | cpu_linear_memory_write_w(UINT32 laddr, UINT16 value, const int user_mode) |
| case 4: | { |
| cpu_memorywrite_d(paddr, value); | const int ucrw = CPU_PAGE_WRITE_DATA|user_mode; |
| break; | UINT32 paddr[2]; |
| paddr[0] = paging(laddr, ucrw); | |
| if ((laddr + 1) & PAGE_MASK) { | |
| cpu_memorywrite_w(paddr[0], value); | |
| } else { | |
| paddr[1] = paging(laddr + 1, ucrw); | |
| cpu_memorywrite(paddr[0], (UINT8)value); | |
| cpu_memorywrite(paddr[1], (UINT8)(value >> 8)); | |
| } | |
| } | |
| void MEMCALL | |
| cpu_linear_memory_write_d(UINT32 laddr, UINT32 value, const int user_mode) | |
| { | |
| const int ucrw = CPU_PAGE_WRITE_DATA|user_mode; | |
| UINT32 paddr[2]; | |
| UINT remain; | |
| paddr[0] = paging(laddr, ucrw); | |
| remain = PAGE_SIZE - (laddr & PAGE_MASK); | |
| if (remain >= 4) { | |
| cpu_memorywrite_d(paddr[0], value); | |
| } else { | |
| paddr[1] = paging(laddr + remain, ucrw); | |
| switch (remain) { | |
| case 3: | case 3: |
| cpu_memorywrite(paddr, value & 0xff); | cpu_memorywrite(paddr[0], (UINT8)value); |
| value >>= 8; | cpu_memorywrite_w(paddr[0] + 1, (UINT16)(value >> 8)); |
| paddr++; | cpu_memorywrite(paddr[1], (UINT8)(value >> 24)); |
| /*FALLTHROUGH*/ | break; |
| case 2: | case 2: |
| cpu_memorywrite_w(paddr, value & 0xffff); | cpu_memorywrite_w(paddr[0], (UINT16)value); |
| value >>= 16; | cpu_memorywrite_w(paddr[1], (UINT16)(value >> 16)); |
| break; | break; |
| case 1: | case 1: |
| cpu_memorywrite(paddr, value & 0xff); | cpu_memorywrite(paddr[0], (UINT8)value); |
| value >>= 8; | cpu_memorywrite_w(paddr[1], (UINT16)(value >> 8)); |
| cpu_memorywrite(paddr[1] + 2, (UINT8)(value >> 24)); | |
| break; | break; |
| } | |
| } | |
| } | |
| default: | void MEMCALL |
| ia32_panic("cpu_linear_memory_write(): out of range (r = %d)\n", r); | cpu_linear_memory_write_q(UINT32 laddr, UINT64 value, const int user_mode) |
| { | |
| const int ucrw = CPU_PAGE_WRITE_DATA|user_mode; | |
| UINT32 paddr[2]; | |
| UINT remain; | |
| paddr[0] = paging(laddr, ucrw); | |
| remain = PAGE_SIZE - (laddr & PAGE_MASK); | |
| if (remain >= 8) { | |
| cpu_memorywrite_q(paddr[0], value); | |
| } else { | |
| paddr[1] = paging(laddr + remain, ucrw); | |
| switch (remain) { | |
| case 7: | |
| cpu_memorywrite(paddr[0], (UINT8)value); | |
| cpu_memorywrite_w(paddr[0] + 1, (UINT16)(value >> 8)); | |
| cpu_memorywrite_d(paddr[0] + 3, (UINT32)(value >> 24)); | |
| cpu_memorywrite(paddr[1], (UINT8)(value >> 56)); | |
| break; | |
| case 6: | |
| cpu_memorywrite_w(paddr[0], (UINT16)value); | |
| cpu_memorywrite_d(paddr[0] + 2, (UINT32)(value >> 16)); | |
| cpu_memorywrite_w(paddr[1], (UINT16)(value >> 48)); | |
| break; | |
| case 5: | |
| cpu_memorywrite(paddr[0], (UINT8)value); | |
| cpu_memorywrite_d(paddr[0] + 1, (UINT32)(value >> 8)); | |
| cpu_memorywrite_w(paddr[1], (UINT16)(value >> 40)); | |
| cpu_memorywrite(paddr[1] + 2, (UINT8)(value >> 56)); | |
| break; | break; |
| } | |
| length -= r; | case 4: |
| if (length == 0) | cpu_memorywrite_d(paddr[0], (UINT32)value); |
| cpu_memorywrite_d(paddr[1], (UINT32)(value >> 32)); | |
| break; | break; |
| laddr += r; | case 3: |
| remain -= r; | cpu_memorywrite(paddr[0], (UINT8)value); |
| if (remain <= 0) { | cpu_memorywrite_w(paddr[0] + 1, (UINT16)(value >> 8)); |
| /* next page */ | cpu_memorywrite_d(paddr[1], (UINT32)(value >> 24)); |
| remain += 0x1000; | cpu_memorywrite(paddr[1] + 4, (UINT8)(value >> 56)); |
| break; | |
| case 2: | |
| cpu_memorywrite_w(paddr[0], (UINT16)value); | |
| cpu_memorywrite_d(paddr[1], (UINT32)(value >> 16)); | |
| cpu_memorywrite_w(paddr[1] + 4, (UINT16)(value >> 48)); | |
| break; | |
| case 1: | |
| cpu_memorywrite(paddr[0], (UINT8)value); | |
| cpu_memorywrite_d(paddr[1], (UINT32)(value >> 8)); | |
| cpu_memorywrite_w(paddr[1] + 4, (UINT16)(value >> 40)); | |
| cpu_memorywrite(paddr[1] + 6, (UINT8)(value >> 56)); | |
| break; | |
| } | } |
| } | } |
| } | } |
| void MEMCALL | void MEMCALL |
| paging_check(DWORD laddr, DWORD length, int crw, int user_mode) | cpu_linear_memory_write_f(UINT32 laddr, const REG80 *value, const int user_mode) |
| { | { |
| DWORD paddr; | const int ucrw = CPU_PAGE_WRITE_DATA|user_mode; |
| DWORD remain; /* page remain */ | UINT32 paddr[2]; |
| DWORD r; | UINT remain; |
| UINT i, j; | |
| paddr[0] = paging(laddr, ucrw); | |
| remain = PAGE_SIZE - (laddr & PAGE_MASK); | |
| if (remain >= 10) { | |
| cpu_memorywrite_f(paddr[0], value); | |
| } else { | |
| paddr[1] = paging(laddr + remain, ucrw); | |
| for (i = 0; i < remain; ++i) { | |
| cpu_memorywrite(paddr[0] + i, value->b[i]); | |
| } | |
| for (j = 0; i < 10; ++i, ++j) { | |
| cpu_memorywrite(paddr[1] + j, value->b[i]); | |
| } | |
| } | |
| } | |
| /* XXX: 4MB pages... */ | |
| remain = 0x1000 - (laddr & 0x00000fff); | void MEMCALL |
| cpu_memory_access_la_region(UINT32 laddr, UINT length, const int ucrw, UINT8 *data) | |
| { | |
| UINT32 paddr; | |
| UINT remain; /* page remain */ | |
| UINT r; | |
| if (length == 0) | |
| return; | |
| remain = PAGE_SIZE - (laddr & PAGE_MASK); | |
| for (;;) { | for (;;) { |
| paddr = paging(laddr, crw, user_mode); | if (!CPU_STAT_PAGING) { |
| paddr = laddr; | |
| } else { | |
| paddr = paging(laddr, ucrw); | |
| } | |
| r = (remain > length) ? length : remain; | r = (remain > length) ? length : remain; |
| if (!(ucrw & CPU_PAGE_WRITE)) { | |
| cpu_memoryread_region(paddr, data, r); | |
| } else { | |
| cpu_memorywrite_region(paddr, data, r); | |
| } | |
| length -= r; | length -= r; |
| if (length == 0) | if (length == 0) |
| break; | break; |
| data += r; | |
| laddr += r; | laddr += r; |
| remain -= r; | remain -= r; |
| if (remain <= 0) { | if (remain <= 0) { |
| /* next page */ | /* next page */ |
| remain += 0x1000; | remain += PAGE_SIZE; |
| } | } |
| } | } |
| } | } |
| static DWORD | UINT32 MEMCALL |
| paging(DWORD laddr, int crw, int user_mode) | laddr2paddr(const UINT32 laddr, const int ucrw) |
| { | { |
| DWORD paddr; /* physical address */ | |
| DWORD pde_addr; /* page directory entry address */ | |
| DWORD pde; /* page directory entry */ | |
| DWORD pte_addr; /* page table entry address */ | |
| DWORD pte; /* page table entry */ | |
| DWORD bit; | |
| DWORD err; | |
| return paging(laddr, ucrw); | |
| } | |
| static UINT32 MEMCALL | |
| paging(const UINT32 laddr, const int ucrw) | |
| { | |
| UINT32 paddr; /* physical address */ | |
| UINT32 pde_addr; /* page directory entry address */ | |
| UINT32 pde; /* page directory entry */ | |
| UINT32 pte_addr; /* page table entry address */ | |
| UINT32 pte; /* page table entry */ | |
| UINT bit; | |
| UINT err; | |
| #if defined(IA32_SUPPORT_TLB) | #if defined(IA32_SUPPORT_TLB) |
| if (tlb_lookup(laddr, crw, &paddr)) | TLB_ENTRY_T *ep; |
| return paddr; | |
| #endif /* IA32_SUPPORT_TLB */ | ep = tlb_lookup(laddr, ucrw); |
| if (ep != NULL) | |
| return ep->paddr + (laddr & PAGE_MASK); | |
| #endif | |
| pde_addr = CPU_STAT_PDE_BASE | ((laddr >> 20) & 0xffc); | pde_addr = CPU_STAT_PDE_BASE + ((laddr >> 20) & 0xffc); |
| pde = cpu_memoryread_d(pde_addr); | pde = cpu_memoryread_d(pde_addr); |
| if (!(pde & CPU_PDE_PRESENT)) { | if (!(pde & CPU_PDE_PRESENT)) { |
| VERBOSE(("paging: PDE is not present")); | VERBOSE(("paging: PTE page is not present")); |
| VERBOSE(("paging: CPU_CR3 = 0x%08x", CPU_CR3)); | VERBOSE(("paging: CPU_CR3 = 0x%08x", CPU_CR3)); |
| VERBOSE(("paging: laddr = 0x%08x, pde_addr = 0x%08x, pde = 0x%08x", laddr, pde_addr, pde)); | VERBOSE(("paging: laddr = 0x%08x, pde_addr = 0x%08x, pde = 0x%08x", laddr, pde_addr, pde)); |
| err = 0; | err = 0; |
| Line 355 paging(DWORD laddr, int crw, int user_mo | Line 663 paging(DWORD laddr, int crw, int user_mo |
| cpu_memorywrite_d(pde_addr, pde); | cpu_memorywrite_d(pde_addr, pde); |
| } | } |
| #if CPU_FAMILY >= 5 | pte_addr = (pde & CPU_PDE_BASEADDR_MASK) + ((laddr >> 10) & 0xffc); |
| /* no support PAE */ | pte = cpu_memoryread_d(pte_addr); |
| __ASSERT(!(CPU_CR4 & CPU_CR4_PAE)); | if (!(pte & CPU_PTE_PRESENT)) { |
| VERBOSE(("paging: page is not present")); | |
| if ((CPU_CR4 & CPU_CR4_PSE) && (pde & CPU_PDE_PAGE_SIZE)) { | VERBOSE(("paging: laddr = 0x%08x, pde_addr = 0x%08x, pde = 0x%08x", laddr, pde_addr, pde)); |
| /* 4MB page size */ | VERBOSE(("paging: pte_addr = 0x%08x, pte = 0x%08x", pte_addr, pte)); |
| err = 0; | |
| /* fake PTE bit */ | goto pf_exception; |
| pte = pde | CPU_PTE_DIRTY; | } |
| pte_addr = 0; /* compiler happy */ | if (!(pte & CPU_PTE_ACCESS)) { |
| pte |= CPU_PTE_ACCESS; | |
| /* make physical address */ | cpu_memorywrite_d(pte_addr, pte); |
| paddr = (pde & CPU_PDE_4M_BASEADDR_MASK) | (laddr & 0x003fffff); | |
| } else | |
| #endif /* CPU_FAMILY >= 5 */ | |
| { | |
| /* 4KB page size */ | |
| pte_addr = (pde & CPU_PDE_BASEADDR_MASK) | ((laddr >> 10) & 0xffc); | |
| pte = cpu_memoryread_d(pte_addr); | |
| if (!(pte & CPU_PTE_PRESENT)) { | |
| VERBOSE(("paging: PTE is not present")); | |
| VERBOSE(("paging: laddr = 0x%08x, pde_addr = 0x%08x, pde = 0x%08x", laddr, pde_addr, pde)); | |
| VERBOSE(("paging: pte_addr = 0x%08x, pte = 0x%08x", pte_addr, pte)); | |
| err = 0; | |
| goto pf_exception; | |
| } | |
| if (!(pte & CPU_PTE_ACCESS)) { | |
| pte |= CPU_PTE_ACCESS; | |
| cpu_memorywrite_d(pte_addr, pte); | |
| } | |
| /* make physical address */ | |
| paddr = (pte & CPU_PTE_BASEADDR_MASK) | (laddr & 0x00000fff); | |
| } | } |
| bit = crw & CPU_PAGE_WRITE; | /* make physical address */ |
| paddr = (pte & CPU_PTE_BASEADDR_MASK) + (laddr & PAGE_MASK); | |
| bit = ucrw & (CPU_PAGE_WRITE|CPU_PAGE_USER_MODE); | |
| bit |= (pde & pte & (CPU_PTE_WRITABLE|CPU_PTE_USER_MODE)); | bit |= (pde & pte & (CPU_PTE_WRITABLE|CPU_PTE_USER_MODE)); |
| bit |= (user_mode << 3); | bit |= CPU_STAT_WP; |
| bit |= (CPU_CR0 & CPU_CR0_WP) >> 12; | |
| #if !defined(USE_PAGE_ACCESS_TABLE) | #if !defined(USE_PAGE_ACCESS_TABLE) |
| if (!(page_access & (1 << bit))) | if (!(page_access & (1 << bit))) |
| Line 409 paging(DWORD laddr, int crw, int user_mo | Line 698 paging(DWORD laddr, int crw, int user_mo |
| goto pf_exception; | goto pf_exception; |
| } | } |
| if ((crw & CPU_PAGE_WRITE) && !(pte & CPU_PTE_DIRTY)) { | if ((ucrw & CPU_PAGE_WRITE) && !(pte & CPU_PTE_DIRTY)) { |
| pte |= CPU_PTE_DIRTY; | pte |= CPU_PTE_DIRTY; |
| cpu_memorywrite_d(pte_addr, pte); | cpu_memorywrite_d(pte_addr, pte); |
| } | } |
| #if defined(IA32_SUPPORT_TLB) | #if defined(IA32_SUPPORT_TLB) |
| tlb_update(paddr, pte, crw); | tlb_update(laddr, pte, (bit & (CPU_PTE_WRITABLE|CPU_PTE_USER_MODE)) + ((ucrw & CPU_PAGE_CODE) >> 1)); |
| #endif /* IA32_SUPPORT_TLB */ | #endif |
| return paddr; | return paddr; |
| pf_exception: | pf_exception: |
| CPU_CR2 = laddr; | CPU_CR2 = laddr; |
| err |= ((crw & CPU_PAGE_WRITE) << 1) | (user_mode << 2); | err |= (ucrw & CPU_PAGE_WRITE) << 1; |
| err |= (ucrw & CPU_PAGE_USER_MODE) >> 1; | |
| EXCEPTION(PF_EXCEPTION, err); | EXCEPTION(PF_EXCEPTION, err); |
| return 0; /* compiler happy */ | return 0; /* compiler happy */ |
| } | } |
| #if defined(IA32_SUPPORT_TLB) | #if defined(IA32_SUPPORT_TLB) |
| /* | /* |
| * TLB | * TLB |
| */ | */ |
| typedef struct { | #define TLB_GET_PADDR(ep, addr) ((ep)->paddr + ((addr) & ~CPU_PTE_BASEADDR_MASK)) |
| BYTE valid; /* TLB entry is valid */ | #define TLB_SET_PADDR(ep, addr) ((ep)->paddr = (addr) & CPU_PTE_BASEADDR_MASK) |
| BYTE global; /* this TLB entry is global */ | |
| BYTE score; | |
| BYTE pad; | |
| DWORD tag; | #define TLB_TAG_SHIFT TLB_ENTRY_TAG_MAX_SHIFT |
| DWORD mask; /* 4K or 2M or 4M */ | #define TLB_TAG_MASK (~((1 << TLB_TAG_SHIFT) - 1)) |
| #define TLB_GET_TAG_ADDR(ep) ((ep)->tag & TLB_TAG_MASK) | |
| #define TLB_SET_TAG_ADDR(ep, addr) \ | |
| do { \ | |
| (ep)->tag &= ~TLB_TAG_MASK; \ | |
| (ep)->tag |= (addr) & TLB_TAG_MASK; \ | |
| } while (/*CONSTCOND(*/ 0) | |
| #define TLB_IS_VALID(ep) ((ep)->tag & TLB_ENTRY_TAG_VALID) | |
| #define TLB_SET_VALID(ep) ((ep)->tag = TLB_ENTRY_TAG_VALID) | |
| #define TLB_SET_INVALID(ep) ((ep)->tag = 0) | |
| #define TLB_IS_WRITABLE(ep) ((ep)->tag & CPU_PTE_WRITABLE) | |
| #define TLB_IS_USERMODE(ep) ((ep)->tag & CPU_PTE_USER_MODE) | |
| #define TLB_IS_DIRTY(ep) ((ep)->tag & TLB_ENTRY_TAG_DIRTY) | |
| #if (CPU_FEATURES & CPU_FEATURE_PGE) == CPU_FEATURE_PGE | |
| #define TLB_IS_GLOBAL(ep) ((ep)->tag & TLB_ENTRY_TAG_GLOBAL) | |
| #else | |
| #define TLB_IS_GLOBAL(ep) 0 | |
| #endif | |
| DWORD paddr; /* physical addr */ | #define TLB_SET_TAG_FLAGS(ep, entry, bit) \ |
| } TLB_ENTRY_T; | do { \ |
| (ep)->tag |= (entry) & (CPU_PTE_GLOBAL_PAGE|CPU_PTE_DIRTY); \ | |
| (ep)->tag |= (bit) & (CPU_PTE_WRITABLE|CPU_PTE_USER_MODE); \ | |
| } while (/*CONSTCOND*/ 0) | |
| #define NTLB 2 /* 0: DTLB, 1: ITLB */ | |
| #define NENTRY (1 << 6) | |
| #define TLB_ENTRY_SHIFT 12 | |
| #define TLB_ENTRY_MASK (NENTRY - 1) | |
| typedef struct { | typedef struct { |
| BYTE kind; | TLB_ENTRY_T entry[NENTRY]; |
| #define TLB_KIND_INSTRUCTION (1 << 1) | |
| #define TLB_KIND_DATA (1 << 2) | |
| #define TLB_KIND_COMBINE (TLB_KIND_INSTRUCTION|TLB_KIND_DATA) | |
| #define TLB_KIND_SMALL (1 << 3) | |
| #define TLB_KIND_LARGE (1 << 4) | |
| #define TLB_KIND_BOTH (TLB_KIND_SMALL|TLB_KIND_LARGE) | |
| BYTE way; /* n-way associative */ | |
| BYTE idx; /* number of TLB index */ | |
| BYTE bpad; | |
| WORD num; /* number of TLB entry */ | |
| WORD wpad; | |
| TLB_ENTRY_T* entry; /* entry[assoc][idx] or entry[assoc] if idx == 1*/ | |
| } TLB_T; | } TLB_T; |
| static int ntlb; | static TLB_T tlb[NTLB]; |
| static TLB_T tlb[4]; /* i TLB, i (lp) TLB, d TLB, d (lp) TLB */ | |
| #if defined(IA32_PROFILE_TLB) | #if defined(IA32_PROFILE_TLB) |
| /* profiling */ | /* profiling */ |
| static DWORD tlb_hits; | typedef struct { |
| static DWORD tlb_misses; | UINT64 tlb_hits; |
| static DWORD tlb_lookups; | UINT64 tlb_misses; |
| static DWORD tlb_updates; | UINT64 tlb_lookups; |
| static DWORD tlb_flushes; | UINT64 tlb_updates; |
| static DWORD tlb_global_flushes; | UINT64 tlb_flushes; |
| static DWORD tlb_entry_flushes; | UINT64 tlb_global_flushes; |
| UINT64 tlb_entry_flushes; | |
| } TLB_PROFILE_T; | |
| #define PROFILE_INC(v) (v)++; | static TLB_PROFILE_T tlb_profile; |
| #define PROFILE_INC(v) tlb_profile.v++ | |
| #else /* !IA32_PROFILE_TLB */ | #else /* !IA32_PROFILE_TLB */ |
| #define PROFILE_INC(v) | #define PROFILE_INC(v) |
| #endif /* IA32_PROFILE_TLB */ | #endif /* IA32_PROFILE_TLB */ |
| void | void |
| tlb_init() | tlb_init(void) |
| { | { |
| int i; | |
| for (i = 0; i < NELEMENTS(tlb); i++) { | |
| if (tlb[i].entry) { | |
| free(tlb[i].entry); | |
| } | |
| } | |
| memset(tlb, 0, sizeof(tlb)); | memset(tlb, 0, sizeof(tlb)); |
| #if defined(IA32_PROFILE_TLB) | #if defined(IA32_PROFILE_TLB) |
| tlb_hits = 0; | memset(tlb_profile, 0, sizeof(tlb_profile)); |
| tlb_misses = 0; | |
| tlb_lookups = 0; | |
| tlb_updates = 0; | |
| tlb_flushes = 0; | |
| tlb_global_flushes = 0; | |
| tlb_entry_flushes = 0; | |
| #endif /* IA32_PROFILE_TLB */ | #endif /* IA32_PROFILE_TLB */ |
| #if CPU_FAMILY == 4 | |
| /* とりあえず i486 形式で… */ | |
| /* combine (I/D) TLB: 4KB Pages, 4-way set associative 32 entries */ | |
| ntlb = 1; | |
| tlb[0].kind = TLB_KIND_COMBINE | TLB_KIND_SMALL; | |
| tlb[0].num = 32; | |
| tlb[0].way = 4; | |
| #endif | |
| for (i = 0; i < ntlb; i++) { | |
| tlb[i].idx = tlb[i].num / tlb[i].way; | |
| tlb[i].entry = (TLB_ENTRY_T*)calloc(sizeof(TLB_ENTRY_T), tlb[i].num); | |
| if (tlb[i].entry == 0) { | |
| ia32_panic("tlb_init(): can't alloc TLB entry\n"); | |
| } | |
| } | |
| } | } |
| void | void MEMCALL |
| tlb_flush(BOOL allflush) | tlb_flush(BOOL allflush) |
| { | { |
| TLB_ENTRY_T* ep; | TLB_ENTRY_T *ep; |
| int i, j; | int i; |
| int n; | |
| if (allflush) { | if (allflush) { |
| PROFILE_INC(tlb_global_flushes); | PROFILE_INC(tlb_global_flushes); |
| Line 533 tlb_flush(BOOL allflush) | Line 806 tlb_flush(BOOL allflush) |
| PROFILE_INC(tlb_flushes); | PROFILE_INC(tlb_flushes); |
| } | } |
| for (i = 0; i < ntlb; i++) { | for (n = 0; n < NTLB; n++) { |
| ep = tlb[i].entry; | for (i = 0; i < NENTRY ; i++) { |
| for (j = 0; j < tlb[i].num; j++, ep++) { | ep = &tlb[n].entry[i]; |
| if (ep->valid && (allflush || !ep->global)) { | if (TLB_IS_VALID(ep) && (allflush || !TLB_IS_GLOBAL(ep))) { |
| ep->valid = 0; | TLB_SET_INVALID(ep); |
| PROFILE_INC(tlb_entry_flushes); | PROFILE_INC(tlb_entry_flushes); |
| } | } |
| } | } |
| } | } |
| } | } |
| void | void MEMCALL |
| tlb_flush_page(DWORD vaddr) | tlb_flush_page(UINT32 laddr) |
| { | { |
| TLB_ENTRY_T* ep; | TLB_ENTRY_T *ep; |
| int idx; | int idx; |
| int i; | int n; |
| for (i = 0; i < ntlb; i++) { | PROFILE_INC(tlb_flushes); |
| if (tlb[i].idx == 1) { | |
| /* fully set associative */ | |
| idx = 0; | |
| } else { | |
| if (tlb[i].kind & TLB_KIND_SMALL) { | |
| idx = (vaddr >> 12) & (tlb[i].idx - 1); | |
| } else { | |
| idx = (vaddr >> 22) & (tlb[i].idx - 1); | |
| } | |
| } | |
| /* search */ | idx = (laddr >> TLB_ENTRY_SHIFT) & TLB_ENTRY_MASK; |
| ep = &tlb[i].entry[idx * tlb[i].way]; | |
| for (i = 0; i < tlb[i].way; i++) { | for (n = 0; n < NTLB; n++) { |
| if (ep->valid) { | ep = &tlb[n].entry[idx]; |
| if ((vaddr & ep->mask) == ep->tag) { | if (TLB_IS_VALID(ep)) { |
| ep->valid = 0; | if ((laddr & TLB_TAG_MASK) == TLB_GET_TAG_ADDR(ep)) { |
| PROFILE_INC(tlb_entry_flushes); | TLB_SET_INVALID(ep); |
| break; | PROFILE_INC(tlb_entry_flushes); |
| } | |
| } | } |
| } | } |
| } | } |
| } | } |
| static BOOL | TLB_ENTRY_T * MEMCALL |
| tlb_lookup(DWORD laddr, int crw, DWORD* paddr) | tlb_lookup(const UINT32 laddr, const int ucrw) |
| { | { |
| TLB_ENTRY_T* ep; | TLB_ENTRY_T *ep; |
| UINT bit; | |
| int idx; | int idx; |
| int i; | int n; |
| PROFILE_INC(tlb_lookups); | PROFILE_INC(tlb_lookups); |
| crw &= CPU_PAGE_CODE | CPU_PAGE_DATA; | n = (ucrw & CPU_PAGE_CODE) >> 1; |
| for (i = 0; i < ntlb; i++) { | idx = (laddr >> TLB_ENTRY_SHIFT) & TLB_ENTRY_MASK; |
| if (tlb[i].kind & crw) { | ep = &tlb[n].entry[idx]; |
| if (tlb[i].idx == 1) { | |
| /* fully set associative */ | if (TLB_IS_VALID(ep)) { |
| idx = 0; | if ((laddr & TLB_TAG_MASK) == TLB_GET_TAG_ADDR(ep)) { |
| } else { | bit = ucrw & (CPU_PAGE_WRITE|CPU_PAGE_USER_MODE); |
| if (tlb[i].kind & TLB_KIND_SMALL) { | bit |= ep->tag & (CPU_PTE_WRITABLE|CPU_PTE_USER_MODE); |
| idx = (laddr >> 12) & (tlb[i].idx - 1); | bit |= CPU_STAT_WP; |
| } else { | #if !defined(USE_PAGE_ACCESS_TABLE) |
| idx = (laddr >> 22) & (tlb[i].idx - 1); | if ((page_access & (1 << bit))) |
| } | #else |
| } | if (page_access_bit[bit]) |
| #endif | |
| /* search */ | { |
| ep = &tlb[i].entry[idx * tlb[i].way]; | if (!(ucrw & CPU_PAGE_WRITE) || TLB_IS_DIRTY(ep)) { |
| for (i = 0; i < tlb[i].way; i++) { | PROFILE_INC(tlb_hits); |
| if (ep->valid) { | return ep; |
| if ((laddr & ep->mask) == ep->tag) { | |
| if (ep->score != (BYTE)~0) | |
| ep->score++; | |
| *paddr = ep->paddr; | |
| PROFILE_INC(tlb_hits); | |
| return TRUE; | |
| } | |
| } | } |
| } | } |
| } | } |
| } | } |
| PROFILE_INC(tlb_misses); | PROFILE_INC(tlb_misses); |
| return FALSE; | return NULL; |
| } | } |
| static void | static void MEMCALL |
| tlb_update(DWORD paddr, DWORD entry, int crw) | tlb_update(const UINT32 laddr, const UINT entry, const int bit) |
| { | { |
| TLB_ENTRY_T* ep; | TLB_ENTRY_T *ep; |
| UINT32 pos; | |
| int idx; | int idx; |
| int i, j; | int n; |
| int min_way; | |
| WORD min_score = ~0; | |
| PROFILE_INC(tlb_updates); | PROFILE_INC(tlb_updates); |
| crw &= CPU_PAGE_CODE | CPU_PAGE_DATA; | n = bit & 1; |
| for (i = 0; i < ntlb; i++) { | idx = (laddr >> TLB_ENTRY_SHIFT) & TLB_ENTRY_MASK; |
| if (tlb[i].kind & crw) { | ep = &tlb[n].entry[idx]; |
| if (tlb[i].idx == 1) { | |
| /* fully set associative */ | TLB_SET_VALID(ep); |
| idx = 0; | TLB_SET_TAG_ADDR(ep, laddr); |
| } else { | TLB_SET_PADDR(ep, entry); |
| /* n-way set associative */ | TLB_SET_TAG_FLAGS(ep, entry, bit); |
| if (!(entry & CPU_PDE_PAGE_SIZE)) { | |
| if (!(tlb[i].kind & TLB_KIND_SMALL)) | if (ep->paddr < CPU_MEMREADMAX) { |
| continue; | ep->memp = mem + ep->paddr; |
| idx = (entry >> 12) & (tlb[i].idx - 1); | return; |
| } else { | } else if (ep->paddr >= USE_HIMEM) { |
| if (!(tlb[i].kind & TLB_KIND_LARGE)) | pos = (ep->paddr & CPU_ADRSMASK) - 0x100000; |
| continue; | if (pos < CPU_EXTMEMSIZE) { |
| idx = (entry >> 22) & (tlb[i].idx - 1); | ep->memp = CPU_EXTMEM + pos; |
| } | return; |
| } | |
| /* search */ | |
| ep = &tlb[i].entry[idx * tlb[i].way]; | |
| for (min_way = 0, j = 0; j < tlb[i].way; j++, ep++) { | |
| if (ep->valid) { | |
| if (min_score >= ep->score) { | |
| min_way = j; | |
| min_score = ep->score; | |
| } | |
| } else { | |
| min_way = j; | |
| min_score = 0; | |
| break; | |
| } | |
| } | |
| /* replace */ | |
| ep = &tlb[i].entry[idx * tlb[i].way + min_way]; | |
| ep->valid = 1; | |
| ep->global = (entry & CPU_PTE_GLOBAL_PAGE) ? 1 : 0; | |
| ep->score = 0; | |
| ep->mask = (entry & CPU_PDE_PAGE_SIZE) ? CPU_PDE_4M_BASEADDR_MASK : CPU_PTE_BASEADDR_MASK; | |
| ep->tag = entry & ep->mask; | |
| ep->paddr = paddr; | |
| break; | |
| } | } |
| } | } |
| __ASSERT(i != ntlb); | ep->memp = NULL; |
| } | } |
| #endif /* IA32_SUPPORT_TLB */ | #endif /* IA32_SUPPORT_TLB */ |