|
|
| version 1.17, 2004/03/23 18:34:05 | version 1.31, 2011/01/15 17:17:23 |
|---|---|
| Line 1 | Line 1 |
| /* $Id$ */ | |
| /* | /* |
| * Copyright (c) 2003-2004 NONAKA Kimihiro | * Copyright (c) 2003-2004 NONAKA Kimihiro |
| * All rights reserved. | * All rights reserved. |
| Line 12 | Line 10 |
| * 2. Redistributions in binary form must reproduce the above copyright | * 2. Redistributions in binary form must reproduce the above copyright |
| * notice, this list of conditions and the following disclaimer in the | * notice, this list of conditions and the following disclaimer in the |
| * documentation and/or other materials provided with the distribution. | * documentation and/or other materials provided with the distribution. |
| * 3. The name of the author may not be used to endorse or promote products | |
| * derived from this software without specific prior written permission. | |
| * | * |
| * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| Line 184 static const UINT8 page_access_bit[32] = | Line 180 static const UINT8 page_access_bit[32] = |
| * +- CR3(物理アドレス) | * +- CR3(物理アドレス) |
| */ | */ |
| static UINT32 paging(const UINT32 laddr, const int crw, const int user_mode); | static UINT32 MEMCALL paging(const UINT32 laddr, const int ucrw); |
| #if defined(IA32_SUPPORT_TLB) | #if defined(IA32_SUPPORT_TLB) |
| static BOOL tlb_lookup(const UINT32 vaddr, const int crw, UINT32 *paddr); | static void MEMCALL tlb_update(const UINT32 laddr, const UINT entry, const int ucrw); |
| static void tlb_update(const UINT32 laddr, const UINT entry, const int crw); | |
| #endif | #endif |
| #define PAGE_SIZE 0x1000 | |
| #define PAGE_MASK (PAGE_SIZE - 1) | |
| void MEMCALL | UINT8 MEMCALL |
| cpu_memory_access_la_region(UINT32 laddr, UINT length, const int crw, const int user_mode, BYTE *data) | cpu_memory_access_la_RMW_b(UINT32 laddr, UINT32 (*func)(UINT32, void *), void *arg) |
| { | { |
| const int ucrw = CPU_PAGE_WRITE_DATA|CPU_STAT_USER_MODE; | |
| UINT32 result, value; | |
| UINT32 paddr; | UINT32 paddr; |
| UINT remain; /* page remain */ | |
| UINT r; | |
| if (length == 0) | paddr = paging(laddr, ucrw); |
| return; | value = cpu_memoryread(paddr); |
| result = (*func)(value, arg); | |
| cpu_memorywrite(paddr, (UINT8)result); | |
| remain = 0x1000 - (laddr & 0x00000fff); | return value; |
| for (;;) { | } |
| if (!CPU_STAT_PAGING) { | |
| paddr = laddr; | |
| } else { | |
| paddr = paging(laddr, crw, user_mode); | |
| } | |
| r = (remain > length) ? length : remain; | |
| if (!(crw & CPU_PAGE_WRITE)) { | |
| cpu_memoryread_region(paddr, data, r); | |
| } else { | |
| cpu_memorywrite_region(paddr, data, r); | |
| } | |
| length -= r; | UINT16 MEMCALL |
| if (length == 0) | cpu_memory_access_la_RMW_w(UINT32 laddr, UINT32 (*func)(UINT32, void *), void *arg) |
| break; | { |
| const int ucrw = CPU_PAGE_WRITE_DATA|CPU_STAT_USER_MODE; | |
| UINT32 result, value; | |
| UINT32 paddr[2]; | |
| data += r; | paddr[0] = paging(laddr, ucrw); |
| laddr += r; | if ((laddr + 1) & PAGE_MASK) { |
| remain -= r; | value = cpu_memoryread_w(paddr[0]); |
| if (remain <= 0) { | result = (*func)(value, arg); |
| /* next page */ | cpu_memorywrite_w(paddr[0], (UINT16)result); |
| remain += 0x1000; | } else { |
| } | paddr[1] = paging(laddr + 1, ucrw); |
| value = cpu_memoryread_b(paddr[0]); | |
| value += (UINT16)cpu_memoryread_b(paddr[1]) << 8; | |
| result = (*func)(value, arg); | |
| cpu_memorywrite(paddr[0], (UINT8)result); | |
| cpu_memorywrite(paddr[1], (UINT8)(result >> 8)); | |
| } | } |
| return value; | |
| } | } |
| UINT32 MEMCALL | UINT32 MEMCALL |
| cpu_memory_access_la_RMW(UINT32 laddr, UINT length, const int user_mode, UINT32 (*func)(UINT32, void *), void *arg) | cpu_memory_access_la_RMW_d(UINT32 laddr, UINT32 (*func)(UINT32, void *), void *arg) |
| { | { |
| const int crw = (CPU_PAGE_WRITE|CPU_PAGE_DATA); | const int ucrw = CPU_PAGE_WRITE_DATA|CPU_STAT_USER_MODE; |
| UINT32 result, value; | UINT32 result, value; |
| UINT32 paddr[2]; | UINT32 paddr[2]; |
| UINT remain; | UINT remain; |
| paddr[0] = paging(laddr, crw, user_mode); | paddr[0] = paging(laddr, ucrw); |
| remain = 0x1000 - (laddr & 0x00000fff); | remain = PAGE_SIZE - (laddr & PAGE_MASK); |
| if (remain >= length) { | if (remain >= 4) { |
| /* fast mode */ | value = cpu_memoryread_d(paddr[0]); |
| switch (length) { | result = (*func)(value, arg); |
| case 4: | cpu_memorywrite_d(paddr[0], result); |
| value = cpu_memoryread_d(paddr[0]); | } else { |
| paddr[1] = paging(laddr + remain, ucrw); | |
| switch (remain) { | |
| case 3: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT32)cpu_memoryread_w(paddr[0] + 1) << 8; | |
| value += (UINT32)cpu_memoryread(paddr[1]) << 24; | |
| result = (*func)(value, arg); | result = (*func)(value, arg); |
| cpu_memorywrite_d(paddr[0], result); | cpu_memorywrite(paddr[0], (UINT8)result); |
| cpu_memorywrite_w(paddr[0] + 1, (UINT16)(result >> 8)); | |
| cpu_memorywrite(paddr[1], (UINT8)(result >> 24)); | |
| break; | break; |
| case 2: | case 2: |
| value = cpu_memoryread_w(paddr[0]); | value = cpu_memoryread_w(paddr[0]); |
| value += (UINT32)cpu_memoryread_w(paddr[1]) << 16; | |
| result = (*func)(value, arg); | result = (*func)(value, arg); |
| cpu_memorywrite_w(paddr[0], (UINT16)result); | cpu_memorywrite_w(paddr[0], (UINT16)result); |
| cpu_memorywrite_w(paddr[1], (UINT16)(result >> 16)); | |
| break; | break; |
| case 1: | case 1: |
| value = cpu_memoryread(paddr[0]); | value = cpu_memoryread(paddr[0]); |
| value += (UINT32)cpu_memoryread_w(paddr[1]) << 8; | |
| value += (UINT32)cpu_memoryread(paddr[1] + 2) << 24; | |
| result = (*func)(value, arg); | result = (*func)(value, arg); |
| cpu_memorywrite(paddr[0], (UINT8)result); | cpu_memorywrite(paddr[0], (UINT8)result); |
| cpu_memorywrite_w(paddr[1], (UINT16)(result >> 8)); | |
| cpu_memorywrite(paddr[1] + 2, (UINT8)(result >> 24)); | |
| break; | break; |
| default: | default: |
| ia32_panic("cpu_memory_access_la_RMW(): invalid length (length = %d)\n", length); | ia32_panic("cpu_memory_access_la_RMW_d(): out of range (remain = %d)\n", remain); |
| value = 0; /* compiler happy */ | return (UINT32)-1; |
| break; | |
| } | } |
| return value; | |
| } | } |
| return value; | |
| } | |
| /* slow mode */ | UINT8 MEMCALL |
| paddr[1] = paging(laddr + remain, crw, user_mode); | cpu_linear_memory_read_b(UINT32 laddr, const int ucrw) |
| switch (remain) { | { |
| case 3: | UINT32 paddr; |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT32)cpu_memoryread_w(paddr[0] + 1) << 8; | |
| value += (UINT32)cpu_memoryread(paddr[1]) << 24; | |
| result = (*func)(value, arg); | |
| cpu_memorywrite(paddr[0], (UINT8)result); | |
| cpu_memorywrite_w(paddr[0] + 1, (UINT16)(result >> 8)); | |
| cpu_memorywrite(paddr[1], (BYTE)(result >> 24)); | |
| break; | |
| case 2: | paddr = paging(laddr, ucrw); |
| value = cpu_memoryread_w(paddr[0]); | return cpu_memoryread(paddr); |
| value += (UINT32)cpu_memoryread_w(paddr[1]) << 16; | } |
| result = (*func)(value, arg); | |
| cpu_memorywrite_w(paddr[0], (UINT16)result); | |
| cpu_memorywrite_w(paddr[1], (UINT16)(result >> 16)); | |
| break; | |
| case 1: | UINT16 MEMCALL |
| value = cpu_memoryread(paddr[1]); | cpu_linear_memory_read_w(UINT32 laddr, const int ucrw) |
| value += (UINT32)cpu_memoryread(paddr[1]) << 8; | { |
| if (length == 4) { | UINT32 paddr[2]; |
| value += (UINT32)cpu_memoryread_w(paddr[1] + 1) << 16; | UINT16 value; |
| } | |
| result = (*func)(value, arg); | |
| cpu_memorywrite(paddr[0], (UINT8)result); | |
| cpu_memorywrite(paddr[1], (UINT8)(result >> 8)); | |
| if (length == 4) { | |
| cpu_memorywrite_w(paddr[1] + 1, (UINT16)(result >> 16)); | |
| } | |
| break; | |
| default: | paddr[0] = paging(laddr, ucrw); |
| ia32_panic("cpu_memory_access_la_RMW(): out of range (remain = %d)\n", remain); | if ((laddr + 1) & PAGE_MASK) { |
| value = 0; /* compiler happy */ | return cpu_memoryread_w(paddr[0]); |
| break; | } else { |
| paddr[1] = paging(laddr + 1, ucrw); | |
| value = cpu_memoryread_b(paddr[0]); | |
| value += (UINT16)cpu_memoryread_b(paddr[1]) << 8; | |
| return value; | |
| } | } |
| return value; | |
| } | } |
| UINT32 MEMCALL | UINT32 MEMCALL |
| cpu_linear_memory_read(UINT32 laddr, UINT length, const int crw, const int user_mode) | cpu_linear_memory_read_d(UINT32 laddr, const int ucrw) |
| { | { |
| UINT32 paddr[2]; | |
| UINT32 value; | UINT32 value; |
| UINT remain; | |
| paddr[0] = paging(laddr, ucrw); | |
| remain = PAGE_SIZE - (laddr & PAGE_MASK); | |
| if (remain >= 4) { | |
| return cpu_memoryread_d(paddr[0]); | |
| } else { | |
| paddr[1] = paging(laddr + remain, ucrw); | |
| switch (remain) { | |
| case 3: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT32)cpu_memoryread_w(paddr[0] + 1) << 8; | |
| value += (UINT32)cpu_memoryread(paddr[1]) << 24; | |
| break; | |
| case 2: | |
| value = cpu_memoryread_w(paddr[0]); | |
| value += (UINT32)cpu_memoryread_w(paddr[1]) << 16; | |
| break; | |
| case 1: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT32)cpu_memoryread_w(paddr[1]) << 8; | |
| value += (UINT32)cpu_memoryread(paddr[1] + 2) << 24; | |
| break; | |
| default: | |
| ia32_panic("cpu_linear_memory_read_d(): out of range (remain = %d)\n", remain); | |
| value = (UINT32)-1; | |
| break; | |
| } | |
| return value; | |
| } | |
| } | |
| UINT64 MEMCALL | |
| cpu_linear_memory_read_q(UINT32 laddr, const int ucrw) | |
| { | |
| UINT32 paddr[2]; | UINT32 paddr[2]; |
| UINT64 value; | |
| UINT remain; | UINT remain; |
| paddr[0] = paging(laddr, crw, user_mode); | paddr[0] = paging(laddr, ucrw); |
| remain = 0x1000 - (laddr & 0x00000fff); | remain = PAGE_SIZE - (laddr & PAGE_MASK); |
| if (remain >= length) { | if (remain >= 8) { |
| /* fast mode */ | return cpu_memoryread_q(paddr[0]); |
| switch (length) { | } else { |
| paddr[1] = paging(laddr + remain, ucrw); | |
| switch (remain) { | |
| case 7: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT64)cpu_memoryread_w(paddr[0] + 1) << 8; | |
| value += (UINT64)cpu_memoryread_d(paddr[0] + 3) << 24; | |
| value += (UINT64)cpu_memoryread(paddr[1]) << 56; | |
| break; | |
| case 6: | |
| value = cpu_memoryread_w(paddr[0]); | |
| value += (UINT64)cpu_memoryread_d(paddr[0] + 2) << 16; | |
| value += (UINT64)cpu_memoryread_w(paddr[1]) << 48; | |
| break; | |
| case 5: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT64)cpu_memoryread_d(paddr[0] + 1) << 8; | |
| value += (UINT64)cpu_memoryread_w(paddr[1]) << 40; | |
| value += (UINT64)cpu_memoryread(paddr[1] + 2) << 56; | |
| break; | |
| case 4: | case 4: |
| value = cpu_memoryread_d(paddr[0]); | value = cpu_memoryread_d(paddr[0]); |
| value += (UINT64)cpu_memoryread_d(paddr[1]) << 32; | |
| break; | |
| case 3: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT64)cpu_memoryread_w(paddr[0] + 1) << 8; | |
| value += (UINT64)cpu_memoryread_d(paddr[1]) << 24; | |
| value += (UINT64)cpu_memoryread(paddr[1] + 4) << 56; | |
| break; | break; |
| case 2: | case 2: |
| value = cpu_memoryread_w(paddr[0]); | value = cpu_memoryread_w(paddr[0]); |
| value += (UINT64)cpu_memoryread_d(paddr[1]) << 16; | |
| value += (UINT64)cpu_memoryread_w(paddr[1] + 4) << 48; | |
| break; | break; |
| case 1: | case 1: |
| value = cpu_memoryread(paddr[0]); | value = cpu_memoryread(paddr[0]); |
| value += (UINT64)cpu_memoryread_d(paddr[1]) << 8; | |
| value += (UINT64)cpu_memoryread_w(paddr[1] + 4) << 40; | |
| value += (UINT64)cpu_memoryread(paddr[1] + 6) << 56; | |
| break; | break; |
| default: | default: |
| ia32_panic("cpu_linear_memory_read(): invalid length (length = %d)\n", length); | ia32_panic("cpu_linear_memory_read_q(): out of range (remain = %d)\n", remain); |
| value = 0; /* compiler happy */ | value = (UINT64)-1; |
| break; | break; |
| } | } |
| } | |
| return value; | |
| } | |
| REG80 MEMCALL | |
| cpu_linear_memory_read_f(UINT32 laddr, const int ucrw) | |
| { | |
| UINT32 paddr[2]; | |
| REG80 value; | |
| UINT remain; | |
| UINT i, j; | |
| paddr[0] = paging(laddr, ucrw); | |
| remain = PAGE_SIZE - (laddr & PAGE_MASK); | |
| if (remain >= 10) { | |
| return cpu_memoryread_f(paddr[0]); | |
| } else { | |
| paddr[1] = paging(laddr + remain, ucrw); | |
| for (i = 0; i < remain; ++i) { | |
| value.b[i] = cpu_memoryread(paddr[0] + i); | |
| } | |
| for (j = 0; i < 10; ++i, ++j) { | |
| value.b[i] = cpu_memoryread(paddr[1] + j); | |
| } | |
| return value; | return value; |
| } | } |
| } | |
| void MEMCALL | |
| cpu_linear_memory_write_b(UINT32 laddr, UINT8 value, const int user_mode) | |
| { | |
| const int ucrw = CPU_PAGE_WRITE_DATA|user_mode; | |
| UINT32 paddr; | |
| /* slow mode */ | paddr = paging(laddr, ucrw); |
| paddr[1] = paging(laddr + remain, crw, user_mode); | cpu_memorywrite(paddr, value); |
| switch (remain) { | } |
| case 3: | |
| value = cpu_memoryread(paddr[0]); | |
| value += (UINT32)cpu_memoryread_w(paddr[0] + 1) << 8; | |
| value += (UINT32)cpu_memoryread(paddr[1]) << 24; | |
| break; | |
| case 2: | void MEMCALL |
| value = cpu_memoryread_w(paddr[0]); | cpu_linear_memory_write_w(UINT32 laddr, UINT16 value, const int user_mode) |
| value += (UINT32)cpu_memoryread_w(paddr[1]) << 16; | { |
| break; | const int ucrw = CPU_PAGE_WRITE_DATA|user_mode; |
| UINT32 paddr[2]; | |
| case 1: | paddr[0] = paging(laddr, ucrw); |
| value = cpu_memoryread(paddr[0]); | if ((laddr + 1) & PAGE_MASK) { |
| value += (UINT32)cpu_memoryread(paddr[1]) << 8; | cpu_memorywrite_w(paddr[0], value); |
| if (length == 4) { | } else { |
| value += (UINT32)cpu_memoryread_w(paddr[1] + 1) << 16; | paddr[1] = paging(laddr + 1, ucrw); |
| } | cpu_memorywrite(paddr[0], (UINT8)value); |
| break; | cpu_memorywrite(paddr[1], (UINT8)(value >> 8)); |
| default: | |
| ia32_panic("cpu_linear_memory_read(): out of range (remain = %d)\n", remain); | |
| value = 0; /* compiler happy */ | |
| break; | |
| } | } |
| return value; | |
| } | } |
| void MEMCALL | void MEMCALL |
| cpu_linear_memory_write(UINT32 laddr, UINT32 value, UINT length, const int user_mode) | cpu_linear_memory_write_d(UINT32 laddr, UINT32 value, const int user_mode) |
| { | { |
| const int crw = (CPU_PAGE_WRITE|CPU_PAGE_DATA); | const int ucrw = CPU_PAGE_WRITE_DATA|user_mode; |
| UINT32 paddr[2]; | UINT32 paddr[2]; |
| UINT remain; | UINT remain; |
| paddr[0] = paging(laddr, crw, user_mode); | paddr[0] = paging(laddr, ucrw); |
| remain = 0x1000 - (laddr & 0x00000fff); | remain = PAGE_SIZE - (laddr & PAGE_MASK); |
| if (remain >= length) { | if (remain >= 4) { |
| /* fast mode */ | cpu_memorywrite_d(paddr[0], value); |
| switch (length) { | } else { |
| case 4: | paddr[1] = paging(laddr + remain, ucrw); |
| cpu_memorywrite_d(paddr[0], value); | switch (remain) { |
| case 3: | |
| cpu_memorywrite(paddr[0], (UINT8)value); | |
| cpu_memorywrite_w(paddr[0] + 1, (UINT16)(value >> 8)); | |
| cpu_memorywrite(paddr[1], (UINT8)(value >> 24)); | |
| break; | break; |
| case 2: | case 2: |
| cpu_memorywrite_w(paddr[0], (UINT16)value); | cpu_memorywrite_w(paddr[0], (UINT16)value); |
| cpu_memorywrite_w(paddr[1], (UINT16)(value >> 16)); | |
| break; | break; |
| case 1: | case 1: |
| cpu_memorywrite(paddr[0], (UINT8)value); | cpu_memorywrite(paddr[0], (UINT8)value); |
| cpu_memorywrite_w(paddr[1], (UINT16)(value >> 8)); | |
| cpu_memorywrite(paddr[1] + 2, (UINT8)(value >> 24)); | |
| break; | break; |
| } | |
| } | |
| } | |
| default: | void MEMCALL |
| ia32_panic("cpu_linear_memory_write(): invalid length (length = %d)\n", length); | cpu_linear_memory_write_q(UINT32 laddr, UINT64 value, const int user_mode) |
| { | |
| const int ucrw = CPU_PAGE_WRITE_DATA|user_mode; | |
| UINT32 paddr[2]; | |
| UINT remain; | |
| paddr[0] = paging(laddr, ucrw); | |
| remain = PAGE_SIZE - (laddr & PAGE_MASK); | |
| if (remain >= 8) { | |
| cpu_memorywrite_q(paddr[0], value); | |
| } else { | |
| paddr[1] = paging(laddr + remain, ucrw); | |
| switch (remain) { | |
| case 7: | |
| cpu_memorywrite(paddr[0], (UINT8)value); | |
| cpu_memorywrite_w(paddr[0] + 1, (UINT16)(value >> 8)); | |
| cpu_memorywrite_d(paddr[0] + 3, (UINT32)(value >> 24)); | |
| cpu_memorywrite(paddr[1], (UINT8)(value >> 56)); | |
| break; | |
| case 6: | |
| cpu_memorywrite_w(paddr[0], (UINT16)value); | |
| cpu_memorywrite_d(paddr[0] + 2, (UINT32)(value >> 16)); | |
| cpu_memorywrite_w(paddr[1], (UINT16)(value >> 48)); | |
| break; | |
| case 5: | |
| cpu_memorywrite(paddr[0], (UINT8)value); | |
| cpu_memorywrite_d(paddr[0] + 1, (UINT32)(value >> 8)); | |
| cpu_memorywrite_w(paddr[1], (UINT16)(value >> 40)); | |
| cpu_memorywrite(paddr[1] + 2, (UINT8)(value >> 56)); | |
| break; | |
| case 4: | |
| cpu_memorywrite_d(paddr[0], (UINT32)value); | |
| cpu_memorywrite_d(paddr[1], (UINT32)(value >> 32)); | |
| break; | |
| case 3: | |
| cpu_memorywrite(paddr[0], (UINT8)value); | |
| cpu_memorywrite_w(paddr[0] + 1, (UINT16)(value >> 8)); | |
| cpu_memorywrite_d(paddr[1], (UINT32)(value >> 24)); | |
| cpu_memorywrite(paddr[1] + 4, (UINT8)(value >> 56)); | |
| break; | |
| case 2: | |
| cpu_memorywrite_w(paddr[0], (UINT16)value); | |
| cpu_memorywrite_d(paddr[1], (UINT32)(value >> 16)); | |
| cpu_memorywrite_w(paddr[1] + 4, (UINT16)(value >> 48)); | |
| break; | |
| case 1: | |
| cpu_memorywrite(paddr[0], (UINT8)value); | |
| cpu_memorywrite_d(paddr[1], (UINT32)(value >> 8)); | |
| cpu_memorywrite_w(paddr[1] + 4, (UINT16)(value >> 40)); | |
| cpu_memorywrite(paddr[1] + 6, (UINT8)(value >> 56)); | |
| break; | break; |
| } | } |
| return; | |
| } | } |
| } | |
| /* slow mode */ | void MEMCALL |
| paddr[1] = paging(laddr + remain, crw, user_mode); | cpu_linear_memory_write_f(UINT32 laddr, const REG80 *value, const int user_mode) |
| switch (remain) { | { |
| case 3: | const int ucrw = CPU_PAGE_WRITE_DATA|user_mode; |
| cpu_memorywrite(paddr[0], (UINT8)value); | UINT32 paddr[2]; |
| cpu_memorywrite_w(paddr[0] + 1, (UINT16)(value >> 8)); | UINT remain; |
| cpu_memorywrite(paddr[1], (BYTE)(value >> 24)); | UINT i, j; |
| break; | |
| case 2: | |
| cpu_memorywrite_w(paddr[0], (UINT16)value); | |
| cpu_memorywrite_w(paddr[1], (UINT16)(value >> 16)); | |
| break; | |
| case 1: | paddr[0] = paging(laddr, ucrw); |
| cpu_memorywrite(paddr[0], (UINT8)value); | remain = PAGE_SIZE - (laddr & PAGE_MASK); |
| cpu_memorywrite(paddr[1], (UINT8)(value >> 8)); | if (remain >= 10) { |
| if (length == 4) { | cpu_memorywrite_f(paddr[0], value); |
| cpu_memorywrite_w(paddr[1] + 1, (UINT16)(value >> 16)); | } else { |
| paddr[1] = paging(laddr + remain, ucrw); | |
| for (i = 0; i < remain; ++i) { | |
| cpu_memorywrite(paddr[0] + i, value->b[i]); | |
| } | |
| for (j = 0; i < 10; ++i, ++j) { | |
| cpu_memorywrite(paddr[1] + j, value->b[i]); | |
| } | } |
| break; | |
| default: | |
| ia32_panic("cpu_linear_memory_write(): out of range (remain = %d)\n", remain); | |
| break; | |
| } | } |
| } | } |
| void MEMCALL | void MEMCALL |
| paging_check(UINT32 laddr, UINT length, const int crw, const int user_mode) | cpu_memory_access_la_region(UINT32 laddr, UINT length, const int ucrw, UINT8 *data) |
| { | { |
| UINT32 paddr; | UINT32 paddr; |
| UINT remain; /* page remain */ | UINT remain; /* page remain */ |
| UINT r; | UINT r; |
| remain = 0x1000 - (laddr & 0x00000fff); | if (length == 0) |
| return; | |
| remain = PAGE_SIZE - (laddr & PAGE_MASK); | |
| for (;;) { | for (;;) { |
| paddr = paging(laddr, crw, user_mode); | if (!CPU_STAT_PAGING) { |
| paddr = laddr; | |
| } else { | |
| paddr = paging(laddr, ucrw); | |
| } | |
| r = (remain > length) ? length : remain; | r = (remain > length) ? length : remain; |
| if (!(ucrw & CPU_PAGE_WRITE)) { | |
| cpu_memoryread_region(paddr, data, r); | |
| } else { | |
| cpu_memorywrite_region(paddr, data, r); | |
| } | |
| length -= r; | length -= r; |
| if (length == 0) | if (length == 0) |
| break; | break; |
| data += r; | |
| laddr += r; | laddr += r; |
| remain -= r; | remain -= r; |
| if (remain <= 0) { | if (remain <= 0) { |
| /* next page */ | /* next page */ |
| remain += 0x1000; | remain += PAGE_SIZE; |
| } | } |
| } | } |
| } | } |
| static UINT32 | UINT32 MEMCALL |
| paging(const UINT32 laddr, const int crw, const int user_mode) | laddr2paddr(const UINT32 laddr, const int ucrw) |
| { | |
| return paging(laddr, ucrw); | |
| } | |
| static UINT32 MEMCALL | |
| paging(const UINT32 laddr, const int ucrw) | |
| { | { |
| UINT32 paddr; /* physical address */ | UINT32 paddr; /* physical address */ |
| UINT32 pde_addr; /* page directory entry address */ | UINT32 pde_addr; /* page directory entry address */ |
| Line 469 paging(const UINT32 laddr, const int crw | Line 639 paging(const UINT32 laddr, const int crw |
| UINT32 pte; /* page table entry */ | UINT32 pte; /* page table entry */ |
| UINT bit; | UINT bit; |
| UINT err; | UINT err; |
| #if defined(IA32_SUPPORT_TLB) | #if defined(IA32_SUPPORT_TLB) |
| if (tlb_lookup(laddr, crw, &paddr)) | TLB_ENTRY_T *ep; |
| return paddr; | |
| #endif /* IA32_SUPPORT_TLB */ | ep = tlb_lookup(laddr, ucrw); |
| if (ep != NULL) | |
| return ep->paddr + (laddr & PAGE_MASK); | |
| #endif | |
| pde_addr = CPU_STAT_PDE_BASE + ((laddr >> 20) & 0xffc); | pde_addr = CPU_STAT_PDE_BASE + ((laddr >> 20) & 0xffc); |
| pde = cpu_memoryread_d(pde_addr); | pde = cpu_memoryread_d(pde_addr); |
| Line 504 paging(const UINT32 laddr, const int crw | Line 676 paging(const UINT32 laddr, const int crw |
| } | } |
| /* make physical address */ | /* make physical address */ |
| paddr = (pte & CPU_PTE_BASEADDR_MASK) + (laddr & 0x00000fff); | paddr = (pte & CPU_PTE_BASEADDR_MASK) + (laddr & PAGE_MASK); |
| bit = crw & CPU_PAGE_WRITE; | bit = ucrw & (CPU_PAGE_WRITE|CPU_PAGE_USER_MODE); |
| bit |= (pde & pte & (CPU_PTE_WRITABLE|CPU_PTE_USER_MODE)); | bit |= (pde & pte & (CPU_PTE_WRITABLE|CPU_PTE_USER_MODE)); |
| bit |= (user_mode << 3); | |
| bit |= CPU_STAT_WP; | bit |= CPU_STAT_WP; |
| #if !defined(USE_PAGE_ACCESS_TABLE) | #if !defined(USE_PAGE_ACCESS_TABLE) |
| Line 525 paging(const UINT32 laddr, const int crw | Line 696 paging(const UINT32 laddr, const int crw |
| goto pf_exception; | goto pf_exception; |
| } | } |
| if ((crw & CPU_PAGE_WRITE) && !(pte & CPU_PTE_DIRTY)) { | if ((ucrw & CPU_PAGE_WRITE) && !(pte & CPU_PTE_DIRTY)) { |
| pte |= CPU_PTE_DIRTY; | pte |= CPU_PTE_DIRTY; |
| cpu_memorywrite_d(pte_addr, pte); | cpu_memorywrite_d(pte_addr, pte); |
| } | } |
| #if defined(IA32_SUPPORT_TLB) | #if defined(IA32_SUPPORT_TLB) |
| tlb_update(laddr, pte, crw); | tlb_update(laddr, pte, (bit & (CPU_PTE_WRITABLE|CPU_PTE_USER_MODE)) + ((ucrw & CPU_PAGE_CODE) >> 1)); |
| #endif /* IA32_SUPPORT_TLB */ | #endif |
| return paddr; | return paddr; |
| pf_exception: | pf_exception: |
| CPU_CR2 = laddr; | CPU_CR2 = laddr; |
| err |= ((crw & CPU_PAGE_WRITE) << 1) | (user_mode << 2); | err |= (ucrw & CPU_PAGE_WRITE) << 1; |
| err |= (ucrw & CPU_PAGE_USER_MODE) >> 1; | |
| EXCEPTION(PF_EXCEPTION, err); | EXCEPTION(PF_EXCEPTION, err); |
| return 0; /* compiler happy */ | return 0; /* compiler happy */ |
| } | } |
| #if defined(IA32_SUPPORT_TLB) | #if defined(IA32_SUPPORT_TLB) |
| /* | /* |
| * TLB | * TLB |
| */ | */ |
| #define TLB_GET_PADDR(ep, addr) ((ep)->paddr + ((addr) & ~CPU_PTE_BASEADDR_MASK)) | |
| #define TLB_SET_PADDR(ep, addr) ((ep)->paddr = (addr) & CPU_PTE_BASEADDR_MASK) | |
| #define TLB_TAG_SHIFT TLB_ENTRY_TAG_MAX_SHIFT | |
| #define TLB_TAG_MASK (~((1 << TLB_TAG_SHIFT) - 1)) | |
| #define TLB_GET_TAG_ADDR(ep) ((ep)->tag & TLB_TAG_MASK) | |
| #define TLB_SET_TAG_ADDR(ep, addr) \ | |
| do { \ | |
| (ep)->tag &= ~TLB_TAG_MASK; \ | |
| (ep)->tag |= (addr) & TLB_TAG_MASK; \ | |
| } while (/*CONSTCOND(*/ 0) | |
| #define TLB_IS_VALID(ep) ((ep)->tag & TLB_ENTRY_TAG_VALID) | |
| #define TLB_SET_VALID(ep) ((ep)->tag = TLB_ENTRY_TAG_VALID) | |
| #define TLB_SET_INVALID(ep) ((ep)->tag = 0) | |
| #define TLB_IS_WRITABLE(ep) ((ep)->tag & CPU_PTE_WRITABLE) | |
| #define TLB_IS_USERMODE(ep) ((ep)->tag & CPU_PTE_USER_MODE) | |
| #define TLB_IS_DIRTY(ep) ((ep)->tag & TLB_ENTRY_TAG_DIRTY) | |
| #if (CPU_FEATURES & CPU_FEATURE_PGE) == CPU_FEATURE_PGE | |
| #define TLB_IS_GLOBAL(ep) ((ep)->tag & TLB_ENTRY_TAG_GLOBAL) | |
| #else | |
| #define TLB_IS_GLOBAL(ep) 0 | |
| #endif | |
| #define TLB_SET_TAG_FLAGS(ep, entry, bit) \ | |
| do { \ | |
| (ep)->tag |= (entry) & (CPU_PTE_GLOBAL_PAGE|CPU_PTE_DIRTY); \ | |
| (ep)->tag |= (bit) & (CPU_PTE_WRITABLE|CPU_PTE_USER_MODE); \ | |
| } while (/*CONSTCOND*/ 0) | |
| #define NTLB 2 /* 0: DTLB, 1: ITLB */ | |
| #define NENTRY (1 << 6) | |
| #define TLB_ENTRY_SHIFT 12 | |
| #define TLB_ENTRY_MASK (NENTRY - 1) | |
| typedef struct { | |
| TLB_ENTRY_T entry[NENTRY]; | |
| } TLB_T; | |
| static TLB_T tlb[NTLB]; | |
| #if defined(IA32_PROFILE_TLB) | #if defined(IA32_PROFILE_TLB) |
| /* profiling */ | /* profiling */ |
| Line 568 static TLB_PROFILE_T tlb_profile; | Line 781 static TLB_PROFILE_T tlb_profile; |
| #endif /* IA32_PROFILE_TLB */ | #endif /* IA32_PROFILE_TLB */ |
| typedef struct { | |
| UINT32 tag; /* linear address */ | |
| #define TLB_ENTRY_VALID (1 << 0) | |
| #define TLB_ENTRY_GLOBAL CPU_PTE_GLOBAL_PAGE | |
| UINT32 paddr; /* physical address */ | |
| } TLB_ENTRY_T; | |
| #define TLB_GET_PADDR(ep, addr) ((ep)->paddr + ((addr) & ~CPU_PTE_BASEADDR_MASK)) | |
| #define TLB_SET_PADDR(ep, addr) ((ep)->paddr = (addr) & CPU_PTE_BASEADDR_MASK) | |
| #define TLB_TAG_SHIFT 17 | |
| #define TLB_TAG_MASK ~((1 << TLB_TAG_SHIFT) - 1) | |
| #define TLB_GET_TAG_ADDR(ep) ((ep)->tag & TLB_TAG_MASK) | |
| #define TLB_SET_TAG_ADDR(ep, addr) \ | |
| ((ep)->tag = ((addr) & TLB_TAG_MASK) + ((ep)->tag & ~TLB_TAG_MASK)) | |
| #define TLB_IS_VALID(ep) ((ep)->tag & TLB_ENTRY_VALID) | |
| #define TLB_SET_VALID(ep) ((ep)->tag |= TLB_ENTRY_VALID) | |
| #define TLB_CLEAR_VALID(ep) ((ep)->tag &= ~TLB_ENTRY_VALID) | |
| #if CPU_FAMILY == 4 | |
| #define TLB_IS_GLOBAL(ep) FALSE | |
| #define TLB_SET_GLOBAL(ep) (void)(ep) | |
| #define TLB_CLEAR_GLOBAL(ep) (void)(ep) | |
| #else | |
| #define TLB_IS_GLOBAL(ep) ((ep)->tag & TLB_ENTRY_GLOBAL) | |
| #define TLB_SET_GLOBAL(ep) ((ep)->tag |= TLB_ENTRY_GLOBAL) | |
| #define TLB_CLEAR_GLOBAL(ep) ((ep)->tag &= ~TLB_ENTRY_GLOBAL) | |
| #endif | |
| #if CPU_FAMILY == 4 | |
| #define NTLB 1 | |
| #define NENTRY (1 << 3) | |
| #define NWAY (1 << 2) | |
| #define TLB_ENTRY_SHIFT 12 | |
| #define TLB_ENTRY_MASK (NENTRY - 1) | |
| #define TLB_WAY_SHIFT 15 | |
| #define TLB_WAY_MASK (NWAY - 1) | |
| #endif | |
| typedef struct { | |
| TLB_ENTRY_T entry[NENTRY][NWAY]; | |
| } TLB_T; | |
| static TLB_T tlb; | |
| void | void |
| tlb_init(void) | tlb_init(void) |
| { | { |
| memset(&tlb, 0, sizeof(tlb)); | memset(tlb, 0, sizeof(tlb)); |
| #if defined(IA32_PROFILE_TLB) | #if defined(IA32_PROFILE_TLB) |
| memset(&tlb_profile, 0, sizeof(tlb_profile)); | memset(tlb_profile, 0, sizeof(tlb_profile)); |
| #endif /* IA32_PROFILE_TLB */ | #endif /* IA32_PROFILE_TLB */ |
| } | } |
| void | void MEMCALL |
| tlb_flush(BOOL allflush) | tlb_flush(BOOL allflush) |
| { | { |
| TLB_ENTRY_T *ep; | TLB_ENTRY_T *ep; |
| int i, j; | int i; |
| int n; | |
| if (allflush) { | if (allflush) { |
| PROFILE_INC(tlb_global_flushes); | PROFILE_INC(tlb_global_flushes); |
| Line 640 tlb_flush(BOOL allflush) | Line 804 tlb_flush(BOOL allflush) |
| PROFILE_INC(tlb_flushes); | PROFILE_INC(tlb_flushes); |
| } | } |
| for (i = 0; i < NENTRY ; i++) { | for (n = 0; n < NTLB; n++) { |
| for (j = 0; j < NWAY; j++) { | for (i = 0; i < NENTRY ; i++) { |
| ep = &tlb.entry[i][j]; | ep = &tlb[n].entry[i]; |
| if (TLB_IS_VALID(ep) && (!TLB_IS_GLOBAL(ep) || allflush)) { | if (TLB_IS_VALID(ep) && (allflush || !TLB_IS_GLOBAL(ep))) { |
| TLB_CLEAR_VALID(ep); | TLB_SET_INVALID(ep); |
| PROFILE_INC(tlb_entry_flushes); | PROFILE_INC(tlb_entry_flushes); |
| } | } |
| } | } |
| } | } |
| } | } |
| void | void MEMCALL |
| tlb_flush_page(UINT32 laddr) | tlb_flush_page(UINT32 laddr) |
| { | { |
| TLB_ENTRY_T *ep; | TLB_ENTRY_T *ep; |
| int idx; | int idx; |
| int way; | int n; |
| PROFILE_INC(tlb_flushes); | PROFILE_INC(tlb_flushes); |
| idx = (laddr >> TLB_ENTRY_SHIFT) & (NENTRY - 1); | idx = (laddr >> TLB_ENTRY_SHIFT) & TLB_ENTRY_MASK; |
| way = (laddr >> TLB_WAY_SHIFT) & (NWAY - 1); | |
| ep = &tlb.entry[idx][way]; | |
| if (TLB_IS_VALID(ep)) { | for (n = 0; n < NTLB; n++) { |
| if ((laddr & TLB_TAG_MASK) == TLB_GET_TAG_ADDR(ep)) { | ep = &tlb[n].entry[idx]; |
| TLB_CLEAR_VALID(ep); | if (TLB_IS_VALID(ep)) { |
| return; | if ((laddr & TLB_TAG_MASK) == TLB_GET_TAG_ADDR(ep)) { |
| TLB_SET_INVALID(ep); | |
| PROFILE_INC(tlb_entry_flushes); | |
| } | |
| } | } |
| } | } |
| } | } |
| static BOOL | TLB_ENTRY_T * MEMCALL |
| tlb_lookup(const UINT32 laddr, const int crw, UINT32 *paddr) | tlb_lookup(const UINT32 laddr, const int ucrw) |
| { | { |
| TLB_ENTRY_T *ep; | TLB_ENTRY_T *ep; |
| UINT bit; | |
| int idx; | int idx; |
| int way; | int n; |
| PROFILE_INC(tlb_lookups); | PROFILE_INC(tlb_lookups); |
| idx = (laddr >> TLB_ENTRY_SHIFT) & (NENTRY - 1); | n = (ucrw & CPU_PAGE_CODE) >> 1; |
| way = (laddr >> TLB_WAY_SHIFT) & (NWAY - 1); | idx = (laddr >> TLB_ENTRY_SHIFT) & TLB_ENTRY_MASK; |
| ep = &tlb.entry[idx][way]; | ep = &tlb[n].entry[idx]; |
| ep = &tlb.entry[idx][way]; | |
| if (TLB_IS_VALID(ep)) { | if (TLB_IS_VALID(ep)) { |
| if ((laddr & TLB_TAG_MASK) == TLB_GET_TAG_ADDR(ep)) { | if ((laddr & TLB_TAG_MASK) == TLB_GET_TAG_ADDR(ep)) { |
| *paddr = TLB_GET_PADDR(ep, laddr); | bit = ucrw & (CPU_PAGE_WRITE|CPU_PAGE_USER_MODE); |
| PROFILE_INC(tlb_hits); | bit |= ep->tag & (CPU_PTE_WRITABLE|CPU_PTE_USER_MODE); |
| return TRUE; | bit |= CPU_STAT_WP; |
| #if !defined(USE_PAGE_ACCESS_TABLE) | |
| if ((page_access & (1 << bit))) | |
| #else | |
| if (page_access_bit[bit]) | |
| #endif | |
| { | |
| if (!(ucrw & CPU_PAGE_WRITE) || TLB_IS_DIRTY(ep)) { | |
| PROFILE_INC(tlb_hits); | |
| return ep; | |
| } | |
| } | |
| } | } |
| } | } |
| (void)crw; | |
| PROFILE_INC(tlb_misses); | PROFILE_INC(tlb_misses); |
| return FALSE; | return NULL; |
| } | } |
| static void | static void MEMCALL |
| tlb_update(const UINT32 laddr, const UINT entry, const int crw) | tlb_update(const UINT32 laddr, const UINT entry, const int bit) |
| { | { |
| TLB_ENTRY_T *ep; | TLB_ENTRY_T *ep; |
| UINT32 pos; | |
| int idx; | int idx; |
| int way; | int n; |
| PROFILE_INC(tlb_updates); | PROFILE_INC(tlb_updates); |
| idx = (laddr >> TLB_ENTRY_SHIFT) & (NENTRY - 1); | n = bit & 1; |
| way = (laddr >> TLB_WAY_SHIFT) & (NWAY - 1); | idx = (laddr >> TLB_ENTRY_SHIFT) & TLB_ENTRY_MASK; |
| ep = &tlb.entry[idx][way]; | ep = &tlb[n].entry[idx]; |
| TLB_SET_VALID(ep); | TLB_SET_VALID(ep); |
| #if CPU_FAMILY >= 5 | |
| if (entry & CPU_PTE_GLOBAL_PAGE) { | |
| TLB_SET_GLOBAL(ep); | |
| } | |
| #endif | |
| TLB_SET_TAG_ADDR(ep, laddr); | TLB_SET_TAG_ADDR(ep, laddr); |
| TLB_SET_PADDR(ep, entry); | TLB_SET_PADDR(ep, entry); |
| (void)crw; | TLB_SET_TAG_FLAGS(ep, entry, bit); |
| if (ep->paddr < CPU_MEMREADMAX) { | |
| ep->memp = mem + ep->paddr; | |
| return; | |
| } else if (ep->paddr >= USE_HIMEM) { | |
| pos = (ep->paddr & CPU_ADRSMASK) - 0x100000; | |
| if (pos < CPU_EXTMEMSIZE) { | |
| ep->memp = CPU_EXTMEM + pos; | |
| return; | |
| } | |
| } | |
| ep->memp = NULL; | |
| } | } |
| #endif /* IA32_SUPPORT_TLB */ | #endif /* IA32_SUPPORT_TLB */ |