--- np2/i386c/ia32/cpu_mem.c 2004/02/20 16:09:04 1.11 +++ np2/i386c/ia32/cpu_mem.c 2004/03/25 15:08:32 1.17 @@ -1,7 +1,7 @@ -/* $Id: cpu_mem.c,v 1.11 2004/02/20 16:09:04 monaka Exp $ */ +/* $Id: cpu_mem.c,v 1.17 2004/03/25 15:08:32 monaka Exp $ */ /* - * Copyright (c) 2002-2003 NONAKA Kimihiro + * Copyright (c) 2002-2004 NONAKA Kimihiro * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -154,19 +154,22 @@ cpu_memorywrite_check(descriptor_t *sd, sd->flag |= CPU_DESC_FLAG_WRITABLE; } -BOOL -cpu_stack_push_check(descriptor_t *sd, UINT32 esp, UINT length) +void +cpu_stack_push_check(UINT16 s, descriptor_t *sd, UINT32 esp, UINT length) { UINT32 limit; if (CPU_STAT_PM) { - if (!sd->valid || !sd->p) - return FALSE; - if (!sd->s || sd->u.seg.c || !sd->u.seg.wr) - return FALSE; + if (!sd->valid || !sd->p) { + VERBOSE(("cpu_stack_push_check: valid = %d, present = %d", sd->valid, sd->p)); + EXCEPTION(SS_EXCEPTION, s & 0xfffc); + } + if (!sd->s || sd->u.seg.c || !sd->u.seg.wr) { + VERBOSE(("cpu_stack_push_check: s = %d, c = %d, wr", sd->s, sd->u.seg.c, sd->u.seg.wr)); + EXCEPTION(SS_EXCEPTION, s & 0xfffc); + } if (!sd->d) { - esp &= 0xffff; limit = 0xffff; } else { limit = 0xffffffff; @@ -176,37 +179,51 @@ cpu_stack_push_check(descriptor_t *sd, U if ((esp == 0) || (esp < length) || (esp - length <= sd->u.seg.limit) - || (esp > limit)) - return FALSE; + || (esp > limit)) { + VERBOSE(("cpu_stack_push_check: expand-down, esp = %08x, length = %08x", esp, length)); + VERBOSE(("cpu_stack_push_check: limit = %08x, seglimit = %08x", limit, sd->u.seg.limit)); + VERBOSE(("cpu_stack_push_check: segbase = %08x, segend = %08x", sd->u.seg.segbase, sd->u.seg.segend)); + EXCEPTION(SS_EXCEPTION, s & 0xfffc); + } } else { /* expand-up stack */ if (esp == 0) { if ((sd->d && (sd->u.seg.segend != 0xffffffff)) - || (!sd->d && (sd->u.seg.segend != 0xffff))) - return FALSE; + || (!sd->d && (sd->u.seg.segend != 0xffff))) { + VERBOSE(("cpu_stack_push_check: expand-up, esp = %08x, length = %08x", esp, length)); + VERBOSE(("cpu_stack_push_check: limit = %08x, seglimit = %08x", limit, sd->u.seg.limit)); + VERBOSE(("cpu_stack_push_check: segbase = %08x, segend = %08x", sd->u.seg.segbase, sd->u.seg.segend)); + EXCEPTION(SS_EXCEPTION, s & 0xfffc); + } } else { if ((esp < length) - || (esp - 1 > sd->u.seg.limit)) - return FALSE; + || (esp - 1 > sd->u.seg.limit)) { + VERBOSE(("cpu_stack_push_check: expand-up, esp = %08x, length = %08x", esp, length)); + VERBOSE(("cpu_stack_push_check: limit = %08x, seglimit = %08x", limit, sd->u.seg.limit)); + VERBOSE(("cpu_stack_push_check: segbase = %08x, segend = %08x", sd->u.seg.segbase, sd->u.seg.segend)); + EXCEPTION(SS_EXCEPTION, s & 0xfffc); + } } } } - return TRUE; } -BOOL -cpu_stack_pop_check(descriptor_t *sd, UINT32 esp, UINT length) +void +cpu_stack_pop_check(UINT16 s, descriptor_t *sd, UINT32 esp, UINT length) { UINT32 limit; if (CPU_STAT_PM) { - if (!sd->valid || !sd->p) - return FALSE; - if (!sd->s || sd->u.seg.c || !sd->u.seg.wr) - return FALSE; + if (!sd->valid || !sd->p) { + VERBOSE(("cpu_stack_pop_check: valid = %d, present = %d", sd->valid, sd->p)); + EXCEPTION(SS_EXCEPTION, s & 0xfffc); + } + if (!sd->s || sd->u.seg.c || !sd->u.seg.wr) { + VERBOSE(("cpu_stack_pop_check: s = %d, c = %d, wr", sd->s, sd->u.seg.c, sd->u.seg.wr)); + EXCEPTION(SS_EXCEPTION, s & 0xfffc); + } if (!sd->d) { - esp &= 0xffff; limit = 0xffff; } else { limit = 0xffffffff; @@ -214,394 +231,240 @@ cpu_stack_pop_check(descriptor_t *sd, UI if (sd->u.seg.ec) { /* expand-down stack */ if ((esp == limit) - || ((limit - esp) + 1 < length)) - return FALSE; + || ((limit - esp) + 1 < length)) { + VERBOSE(("cpu_stack_pop_check: expand-up, esp = %08x, length = %08x", esp, length)); + VERBOSE(("cpu_stack_pop_check: limit = %08x, seglimit = %08x", limit, sd->u.seg.limit)); + VERBOSE(("cpu_stack_pop_check: segbase = %08x, segend = %08x", sd->u.seg.segbase, sd->u.seg.segend)); + EXCEPTION(SS_EXCEPTION, s & 0xfffc); + } } else { /* expand-up stack */ if ((esp == limit) || (sd->u.seg.segend == 0) || (esp > sd->u.seg.limit) - || ((sd->u.seg.limit - esp) + 1 < length)) - return FALSE; + || ((sd->u.seg.limit - esp) + 1 < length)) { + VERBOSE(("cpu_stack_pop_check: expand-up, esp = %08x, length = %08x", esp, length)); + VERBOSE(("cpu_stack_pop_check: limit = %08x, seglimit = %08x", limit, sd->u.seg.limit)); + VERBOSE(("cpu_stack_pop_check: segbase = %08x, segend = %08x", sd->u.seg.segbase, sd->u.seg.segend)); + EXCEPTION(SS_EXCEPTION, s & 0xfffc); + } } } - return TRUE; } +#if defined(IA32_SUPPORT_PREFETCH_QUEUE) /* - * code fetch + * code prefetch */ -UINT8 MEMCALL -cpu_codefetch(UINT32 offset) +#define CPU_PREFETCHQ_MASK (CPU_PREFETCH_QUEUE_LENGTH - 1) + +INLINE static MEMCALL void +cpu_prefetch(UINT32 address) { - descriptor_t *sd; - UINT32 addr; + UINT offset = address & CPU_PREFETCHQ_MASK; + UINT length = CPU_PREFETCH_QUEUE_LENGTH - offset; - sd = &CPU_STAT_SREG(CPU_CS_INDEX); - if (offset <= sd->u.seg.limit) { - addr = CPU_STAT_SREGBASE(CPU_CS_INDEX) + offset; - if (!CPU_STAT_PM) - return cpu_memoryread(addr); - return cpu_lcmemoryread(addr); - } - EXCEPTION(GP_EXCEPTION, 0); - return 0; /* compiler happy */ + cpu_memory_access_la_region(address, length, CPU_PAGE_READ_CODE|CPU_STAT_USER_MODE, CPU_PREFETCHQ + offset); + CPU_PREFETCHQ_REMAIN = (SINT8)length; } -UINT16 MEMCALL -cpu_codefetch_w(UINT32 offset) +INLINE static MEMCALL UINT8 +cpu_prefetchq(UINT32 address) { - descriptor_t *sd; - UINT32 addr; + UINT8 v; - sd = &CPU_STAT_SREG(CPU_CS_INDEX); - if (offset <= sd->u.seg.limit - 1) { - addr = CPU_STAT_SREGBASE(CPU_CS_INDEX) + offset; - if (!CPU_STAT_PM) - return cpu_memoryread_w(addr); - return cpu_lcmemoryread_w(addr); - } - EXCEPTION(GP_EXCEPTION, 0); - return 0; /* compiler happy */ + CPU_PREFETCHQ_REMAIN--; + v = CPU_PREFETCHQ[address & CPU_PREFETCHQ_MASK]; + return v; } -UINT32 MEMCALL -cpu_codefetch_d(UINT32 offset) +INLINE static MEMCALL UINT16 +cpu_prefetchq_w(UINT32 address) { - descriptor_t *sd; - UINT32 addr; + BYTE *p; + UINT16 v; - sd = &CPU_STAT_SREG(CPU_CS_INDEX); - if (offset <= sd->u.seg.limit - 3) { - addr = CPU_STAT_SREGBASE(CPU_CS_INDEX) + offset; - if (!CPU_STAT_PM) - return cpu_memoryread_d(addr); - return cpu_lcmemoryread_d(addr); - } - EXCEPTION(GP_EXCEPTION, 0); - return 0; /* compiler happy */ + CPU_PREFETCHQ_REMAIN -= 2; + p = CPU_PREFETCHQ + (address & CPU_PREFETCHQ_MASK); + v = LOADINTELWORD(p); + return v; } - -/* - * virtual address -> linear address - */ -UINT8 MEMCALL -cpu_vmemoryread(int idx, UINT32 offset) +INLINE static MEMCALL UINT32 +cpu_prefetchq_3(UINT32 address) { - descriptor_t *sd; - UINT32 addr; - int exc; - - __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); - - sd = &CPU_STAT_SREG(idx); - if (!sd->valid) { - exc = GP_EXCEPTION; - goto err; - } - - if (!(sd->flag & CPU_DESC_FLAG_READABLE)) { - cpu_memoryread_check(sd, offset, 1, - (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); - } else { - switch (sd->type) { - case 4: case 5: case 6: case 7: - if (offset <= sd->u.seg.limit) { - if (idx == CPU_SS_INDEX) - exc = SS_EXCEPTION; - else - exc = GP_EXCEPTION; - goto err; - } - break; - - default: - if (offset > sd->u.seg.limit) { - if (idx == CPU_SS_INDEX) - exc = SS_EXCEPTION; - else - exc = GP_EXCEPTION; - goto err; - } - break; - } - } - addr = CPU_STAT_SREGBASE(idx) + offset; - if (!CPU_STAT_PM) - return cpu_memoryread(addr); - return cpu_lmemoryread(addr, CPU_STAT_USER_MODE); + BYTE *p; + UINT32 v; -err: - EXCEPTION(exc, 0); - return 0; /* compiler happy */ + CPU_PREFETCHQ_REMAIN -= 3; + p = CPU_PREFETCHQ + (address & CPU_PREFETCHQ_MASK); + v = LOADINTELWORD(p); + v += ((UINT32)p[2]) << 16; + return v; } -UINT16 MEMCALL -cpu_vmemoryread_w(int idx, UINT32 offset) +INLINE static MEMCALL UINT32 +cpu_prefetchq_d(UINT32 address) { - descriptor_t *sd; - UINT32 addr; - int exc; + BYTE *p; + UINT32 v; - __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); + CPU_PREFETCHQ_REMAIN -= 4; + p = CPU_PREFETCHQ + (address & CPU_PREFETCHQ_MASK); + v = LOADINTELDWORD(p); + return v; +} +#endif /* IA32_SUPPORT_PREFETCH_QUEUE */ - sd = &CPU_STAT_SREG(idx); - if (!sd->valid) { - exc = GP_EXCEPTION; - goto err; - } +#if defined(IA32_SUPPORT_DEBUG_REGISTER) +INLINE static void +check_memory_break_point(UINT32 address, UINT length, UINT rw) +{ + int i; - if (!(sd->flag & CPU_DESC_FLAG_READABLE)) { - cpu_memoryread_check(sd, offset, 2, - (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); - } else { - switch (sd->type) { - case 4: case 5: case 6: case 7: - if (offset - 1 <= sd->u.seg.limit) { - if (idx == CPU_SS_INDEX) - exc = SS_EXCEPTION; - else - exc = GP_EXCEPTION; - goto err; - } - break; + if (CPU_STAT_BP && !(CPU_EFLAG & RF_FLAG)) { + for (i = 0; i < CPU_DEBUG_REG_INDEX_NUM; i++) { + if ((CPU_STAT_BP & (1 << i)) + && (CPU_DR7_GET_RW(i) & rw) - default: - if (offset > sd->u.seg.limit - 1) { - if (idx == CPU_SS_INDEX) - exc = SS_EXCEPTION; - else - exc = GP_EXCEPTION; - goto err; + && ((address <= CPU_DR(i) && address + length > CPU_DR(i)) + || (address > CPU_DR(i) && address < CPU_DR(i) + CPU_DR7_GET_LEN(i)))) { + CPU_STAT_BP_EVENT |= CPU_STAT_BP_EVENT_B(i); } - break; } - } - addr = CPU_STAT_SREGBASE(idx) + offset; - if (!CPU_STAT_PM) - return cpu_memoryread_w(addr); - return cpu_lmemoryread_w(addr, CPU_STAT_USER_MODE); - -err: - EXCEPTION(exc, 0); - return 0; /* compiler happy */ + } } +#else +#define check_memory_break_point(address, length, rw) +#endif -UINT32 MEMCALL -cpu_vmemoryread_d(int idx, UINT32 offset) +/* + * code fetch + */ +UINT8 MEMCALL +cpu_codefetch(UINT32 offset) { descriptor_t *sd; UINT32 addr; - int exc; - - __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); - - sd = &CPU_STAT_SREG(idx); - if (!sd->valid) { - exc = GP_EXCEPTION; - goto err; - } - if (!(sd->flag & CPU_DESC_FLAG_READABLE)) { - cpu_memoryread_check(sd, offset, 4, - (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); - } else { - switch (sd->type) { - case 4: case 5: case 6: case 7: - if (offset - 3 <= sd->u.seg.limit) { - if (idx == CPU_SS_INDEX) - exc = SS_EXCEPTION; - else - exc = GP_EXCEPTION; - goto err; - } - break; - - default: - if (offset > sd->u.seg.limit - 3) { - if (idx == CPU_SS_INDEX) - exc = SS_EXCEPTION; - else - exc = GP_EXCEPTION; - goto err; - } - break; - } + sd = &CPU_STAT_SREG(CPU_CS_INDEX); + if (offset <= sd->u.seg.limit) { + addr = sd->u.seg.segbase + offset; +#if defined(IA32_SUPPORT_PREFETCH_QUEUE) + if (CPU_PREFETCHQ_REMAIN <= 0) { + cpu_prefetch(addr); + } + return cpu_prefetchq(addr); +#else /* !IA32_SUPPORT_PREFETCH_QUEUE */ + if (!CPU_STAT_PAGING) + return cpu_memoryread(addr); + return cpu_linear_memory_read_b(addr, CPU_PAGE_READ_CODE | CPU_STAT_USER_MODE); +#endif /* IA32_SUPPORT_PREFETCH_QUEUE */ } - addr = CPU_STAT_SREGBASE(idx) + offset; - if (!CPU_STAT_PM) - return cpu_memoryread_d(addr); - return cpu_lmemoryread_d(addr, CPU_STAT_USER_MODE); - -err: - EXCEPTION(exc, 0); + EXCEPTION(GP_EXCEPTION, 0); return 0; /* compiler happy */ } -/* vaddr memory write */ -void MEMCALL -cpu_vmemorywrite(int idx, UINT32 offset, UINT8 val) +UINT16 MEMCALL +cpu_codefetch_w(UINT32 offset) { descriptor_t *sd; UINT32 addr; - int exc; - - __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); - - sd = &CPU_STAT_SREG(idx); - if (!sd->valid) { - exc = GP_EXCEPTION; - goto err; - } - - if (!(sd->flag & CPU_DESC_FLAG_WRITABLE)) { - cpu_memorywrite_check(sd, offset, 1, - (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); - } else { - switch (sd->type) { - case 6: case 7: - if (offset <= sd->u.seg.limit) { - if (idx == CPU_SS_INDEX) - exc = SS_EXCEPTION; - else - exc = GP_EXCEPTION; - goto err; - } - break; +#if defined(IA32_SUPPORT_PREFETCH_QUEUE) + UINT16 v; +#endif - default: - if (offset > sd->u.seg.limit) { - if (idx == CPU_SS_INDEX) - exc = SS_EXCEPTION; - else - exc = GP_EXCEPTION; - goto err; - } - break; - } - } - addr = CPU_STAT_SREGBASE(idx) + offset; - if (!CPU_STAT_PM) { - /* real mode */ - cpu_memorywrite(addr, val); - } else { - /* protected mode */ - cpu_lmemorywrite(addr, val, CPU_STAT_USER_MODE); + sd = &CPU_STAT_SREG(CPU_CS_INDEX); + if (offset <= sd->u.seg.limit - 1) { + addr = sd->u.seg.segbase + offset; +#if defined(IA32_SUPPORT_PREFETCH_QUEUE) + if (CPU_PREFETCHQ_REMAIN <= 0) { + cpu_prefetch(addr); + } + if (CPU_PREFETCHQ_REMAIN >= 2) { + return cpu_prefetchq_w(addr); + } + + v = cpu_prefetchq(addr); + addr++; + cpu_prefetch(addr); + v += (UINT16)cpu_prefetchq(addr) << 8; + return v; +#else /* !IA32_SUPPORT_PREFETCH_QUEUE */ + if (!CPU_STAT_PAGING) + return cpu_memoryread_w(addr); + return cpu_linear_memory_read_w(addr, CPU_PAGE_READ_CODE | CPU_STAT_USER_MODE); +#endif /* IA32_SUPPORT_PREFETCH_QUEUE */ } - return; - -err: - EXCEPTION(exc, 0); + EXCEPTION(GP_EXCEPTION, 0); + return 0; /* compiler happy */ } -void MEMCALL -cpu_vmemorywrite_w(int idx, UINT32 offset, UINT16 val) +UINT32 MEMCALL +cpu_codefetch_d(UINT32 offset) { descriptor_t *sd; UINT32 addr; - int exc; - - __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); - - sd = &CPU_STAT_SREG(idx); - if (!sd->valid) { - exc = GP_EXCEPTION; - goto err; - } - - if (!(sd->flag & CPU_DESC_FLAG_WRITABLE)) { - cpu_memorywrite_check(sd, offset, 2, - (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); - } else { - switch (sd->type) { - case 6: case 7: - if (offset - 1 <= sd->u.seg.limit) { - if (idx == CPU_SS_INDEX) - exc = SS_EXCEPTION; - else - exc = GP_EXCEPTION; - goto err; - } - break; +#if defined(IA32_SUPPORT_PREFETCH_QUEUE) + UINT32 v; +#endif - default: - if (offset > sd->u.seg.limit - 1) { - if (idx == CPU_SS_INDEX) - exc = SS_EXCEPTION; - else - exc = GP_EXCEPTION; - goto err; + sd = &CPU_STAT_SREG(CPU_CS_INDEX); + if (offset <= sd->u.seg.limit - 3) { + addr = sd->u.seg.segbase + offset; +#if defined(IA32_SUPPORT_PREFETCH_QUEUE) + if (CPU_PREFETCHQ_REMAIN <= 0) { + cpu_prefetch(addr); + } + if (CPU_PREFETCHQ_REMAIN >= 4) { + return cpu_prefetchq_d(addr); + } else { + switch (CPU_PREFETCHQ_REMAIN) { + case 1: + v = cpu_prefetchq(addr); + addr++; + cpu_prefetch(addr); + v += (UINT32)cpu_prefetchq_3(addr) << 8; + break; + + case 2: + v = cpu_prefetchq_w(addr); + addr += 2; + cpu_prefetch(addr); + v += (UINT32)cpu_prefetchq_w(addr) << 16; + break; + + case 3: + v = cpu_prefetchq_3(addr); + addr += 3; + cpu_prefetch(addr); + v += (UINT32)cpu_prefetchq(addr) << 24; + break; + + default: + ia32_panic("cpu_codefetch_d: remain bytes is invalid"); + v = 0; /* compiler happy */ + break; } - break; + return v; } +#else /* !IA32_SUPPORT_PREFETCH_QUEUE */ + if (!CPU_STAT_PAGING) + return cpu_memoryread_d(addr); + return cpu_linear_memory_read_d(addr, CPU_PAGE_READ_CODE | CPU_STAT_USER_MODE); +#endif /* IA32_SUPPORT_PREFETCH_QUEUE */ } - addr = CPU_STAT_SREGBASE(idx) + offset; - if (!CPU_STAT_PM) { - /* real mode */ - cpu_memorywrite_w(addr, val); - } else { - /* protected mode */ - cpu_lmemorywrite_w(addr, val, CPU_STAT_USER_MODE); - } - return; - -err: - EXCEPTION(exc, 0); + EXCEPTION(GP_EXCEPTION, 0); + return 0; /* compiler happy */ } -void MEMCALL -cpu_vmemorywrite_d(int idx, UINT32 offset, UINT32 val) -{ - descriptor_t *sd; - UINT32 addr; - int exc; - - __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); - - sd = &CPU_STAT_SREG(idx); - if (!sd->valid) { - exc = GP_EXCEPTION; - goto err; - } - - if (!(sd->flag & CPU_DESC_FLAG_WRITABLE)) { - cpu_memorywrite_check(sd, offset, 4, - (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); - } else { - switch (sd->type) { - case 6: case 7: - if (offset - 3 <= sd->u.seg.limit) { - if (idx == CPU_SS_INDEX) - exc = SS_EXCEPTION; - else - exc = GP_EXCEPTION; - goto err; - } - break; - default: - if (offset > sd->u.seg.limit - 3) { - if (idx == CPU_SS_INDEX) - exc = SS_EXCEPTION; - else - exc = GP_EXCEPTION; - goto err; - } - break; - } - } - addr = CPU_STAT_SREGBASE(idx) + offset; - if (!CPU_STAT_PM) { - /* real mode */ - cpu_memorywrite_d(addr, val); - } else { - /* protected mode */ - cpu_lmemorywrite_d(addr, val, CPU_STAT_USER_MODE); - } - return; +/* + * virtual address memory access functions + */ +#include "cpu_mem.mcr" -err: - EXCEPTION(exc, 0); -} +VIRTUAL_ADDRESS_MEMORY_ACCESS_FUNCTION(b, UINT8, 1) +VIRTUAL_ADDRESS_MEMORY_ACCESS_FUNCTION(w, UINT16, 2) +VIRTUAL_ADDRESS_MEMORY_ACCESS_FUNCTION(d, UINT32, 4)