|
|
| version 1.7, 2011/12/29 13:32:12 | version 1.9, 2012/01/08 19:09:40 |
|---|---|
| Line 23 | Line 23 |
| * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| */ | */ |
| #define VIRTUAL_ADDRESS_MEMORY_ACCESS_FUNCTION(width, valtype, length) \ | #define DECLARE_VIRTUAL_ADDRESS_MEMORY_RW_FUNCTIONS(width, valtype, length) \ |
| valtype MEMCALL \ | valtype MEMCALL \ |
| cpu_vmemoryread_##width(int idx, UINT32 offset) \ | cpu_vmemoryread_##width(int idx, UINT32 offset) \ |
| { \ | { \ |
| Line 34 cpu_vmemoryread_##width(int idx, UINT32 | Line 34 cpu_vmemoryread_##width(int idx, UINT32 |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); \ | __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); \ |
| \ | \ |
| sdp = &CPU_STAT_SREG(idx); \ | sdp = &CPU_STAT_SREG(idx); \ |
| addr = sdp->u.seg.segbase + offset; \ | |
| \ | |
| if (!CPU_STAT_PM) \ | |
| return cpu_memoryread_##width(addr); \ | |
| \ | |
| if (!SEG_IS_VALID(sdp)) { \ | if (!SEG_IS_VALID(sdp)) { \ |
| exc = GP_EXCEPTION; \ | exc = GP_EXCEPTION; \ |
| goto err; \ | goto err; \ |
| } \ | } \ |
| \ | |
| if (!(sdp->flag & CPU_DESC_FLAG_READABLE)) { \ | if (!(sdp->flag & CPU_DESC_FLAG_READABLE)) { \ |
| cpu_memoryread_check(sdp, offset, (length), \ | cpu_memoryread_check(sdp, offset, (length), \ |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); \ | CHOOSE_EXCEPTION(idx)); \ |
| } else if (!(sdp->flag & CPU_DESC_FLAG_WHOLEADR)) { \ | } else if (!(sdp->flag & CPU_DESC_FLAG_WHOLEADR)) { \ |
| if (!check_limit_upstairs(sdp, offset, (length))) \ | if (!check_limit_upstairs(sdp, offset, (length))) \ |
| goto range_failure; \ | goto range_failure; \ |
| } \ | } \ |
| addr = sdp->u.seg.segbase + offset; \ | return cpu_lmemoryread_##width(addr, CPU_PAGE_READ_DATA | CPU_STAT_USER_MODE); \ |
| check_memory_break_point(addr, (length), CPU_DR7_RW_RO); \ | |
| if (!CPU_STAT_PAGING) \ | |
| return cpu_memoryread_##width(addr); \ | |
| return cpu_linear_memory_read_##width(addr, CPU_PAGE_READ_DATA | CPU_STAT_USER_MODE); \ | |
| \ | \ |
| range_failure: \ | range_failure: \ |
| VERBOSE(("cpu_vmemoryread_" #width ": type = %d, offset = %08x, length = %d, limit = %08x", sdp->type, offset, length, sdp->u.seg.limit)); \ | VERBOSE(("cpu_vmemoryread_" #width ": type = %d, offset = %08x, length = %d, limit = %08x", sdp->type, offset, length, sdp->u.seg.limit)); \ |
| exc = (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION; \ | exc = CHOOSE_EXCEPTION(idx); \ |
| err: \ | err: \ |
| EXCEPTION(exc, 0); \ | EXCEPTION(exc, 0); \ |
| return 0; /* compiler happy */ \ | return 0; /* compiler happy */ \ |
| Line 70 cpu_vmemorywrite_##width(int idx, UINT32 | Line 70 cpu_vmemorywrite_##width(int idx, UINT32 |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); \ | __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); \ |
| \ | \ |
| sdp = &CPU_STAT_SREG(idx); \ | sdp = &CPU_STAT_SREG(idx); \ |
| addr = sdp->u.seg.segbase + offset; \ | |
| \ | |
| if (!CPU_STAT_PM) { \ | |
| cpu_memorywrite_##width(addr, value); \ | |
| return; \ | |
| } \ | |
| \ | |
| if (!SEG_IS_VALID(sdp)) { \ | if (!SEG_IS_VALID(sdp)) { \ |
| exc = GP_EXCEPTION; \ | exc = GP_EXCEPTION; \ |
| goto err; \ | goto err; \ |
| } \ | } \ |
| \ | |
| if (!(sdp->flag & CPU_DESC_FLAG_WRITABLE)) { \ | if (!(sdp->flag & CPU_DESC_FLAG_WRITABLE)) { \ |
| cpu_memorywrite_check(sdp, offset, (length), \ | cpu_memorywrite_check(sdp, offset, (length), \ |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); \ | CHOOSE_EXCEPTION(idx)); \ |
| } else if (!(sdp->flag & CPU_DESC_FLAG_WHOLEADR)) { \ | } else if (!(sdp->flag & CPU_DESC_FLAG_WHOLEADR)) { \ |
| if (!check_limit_upstairs(sdp, offset, (length))) \ | if (!check_limit_upstairs(sdp, offset, (length))) \ |
| goto range_failure; \ | goto range_failure; \ |
| } \ | } \ |
| addr = sdp->u.seg.segbase + offset; \ | cpu_lmemorywrite_##width(addr, value, CPU_PAGE_WRITE_DATA | CPU_STAT_USER_MODE); \ |
| check_memory_break_point(addr, (length), CPU_DR7_RW_RW); \ | |
| if (!CPU_STAT_PAGING) { \ | |
| cpu_memorywrite_##width(addr, value); \ | |
| } else { \ | |
| cpu_linear_memory_write_##width(addr, value, CPU_PAGE_WRITE_DATA | CPU_STAT_USER_MODE); \ | |
| } \ | |
| return; \ | return; \ |
| \ | \ |
| range_failure: \ | range_failure: \ |
| VERBOSE(("cpu_vmemorywrite_" #width ": type = %d, offset = %08x, length = %d, limit = %08x", sdp->type, offset, length, sdp->u.seg.limit)); \ | VERBOSE(("cpu_vmemorywrite_" #width ": type = %d, offset = %08x, length = %d, limit = %08x", sdp->type, offset, length, sdp->u.seg.limit)); \ |
| exc = (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION; \ | exc = CHOOSE_EXCEPTION(idx); \ |
| err: \ | err: \ |
| EXCEPTION(exc, 0); \ | EXCEPTION(exc, 0); \ |
| } \ | } |
| \ | |
| #define DECLARE_VIRTUAL_ADDRESS_MEMORY_RMW_FUNCTIONS(width, valtype, length) \ | |
| UINT32 MEMCALL \ | UINT32 MEMCALL \ |
| cpu_memory_access_va_RMW_##width(int idx, UINT32 offset, UINT32 (CPUCALL *func)(UINT32, void *), void *arg) \ | cpu_vmemory_RMW_##width(int idx, UINT32 offset, UINT32 (CPUCALL *func)(UINT32, void *), void *arg) \ |
| { \ | { \ |
| descriptor_t *sdp; \ | descriptor_t *sdp; \ |
| UINT32 addr; \ | UINT32 addr; \ |
| UINT32 res, dst; \ | UINT32 result; \ |
| valtype value; \ | |
| int exc; \ | int exc; \ |
| \ | \ |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); \ | __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); \ |
| \ | \ |
| sdp = &CPU_STAT_SREG(idx); \ | sdp = &CPU_STAT_SREG(idx); \ |
| addr = sdp->u.seg.segbase + offset; \ | |
| \ | |
| if (!CPU_STAT_PM) { \ | |
| value = cpu_memoryread_##width(addr); \ | |
| result = (*func)(value, arg); \ | |
| cpu_memorywrite_##width(addr, result); \ | |
| return value; \ | |
| } \ | |
| \ | |
| if (!SEG_IS_VALID(sdp)) { \ | if (!SEG_IS_VALID(sdp)) { \ |
| exc = GP_EXCEPTION; \ | exc = GP_EXCEPTION; \ |
| goto err; \ | goto err; \ |
| } \ | } \ |
| \ | |
| if (!(sdp->flag & CPU_DESC_FLAG_WRITABLE)) { \ | if (!(sdp->flag & CPU_DESC_FLAG_WRITABLE)) { \ |
| cpu_memorywrite_check(sdp, offset, (length), \ | cpu_memorywrite_check(sdp, offset, (length), \ |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); \ | CHOOSE_EXCEPTION(idx)); \ |
| } else if (!(sdp->flag & CPU_DESC_FLAG_WHOLEADR)) { \ | } else if (!(sdp->flag & CPU_DESC_FLAG_WHOLEADR)) { \ |
| if (!check_limit_upstairs(sdp, offset, (length))) \ | if (!check_limit_upstairs(sdp, offset, (length))) \ |
| goto range_failure; \ | goto range_failure; \ |
| } \ | } \ |
| addr = sdp->u.seg.segbase + offset; \ | return cpu_lmemory_RMW_##width(addr, func, arg); \ |
| check_memory_break_point(addr, (length), CPU_DR7_RW_RW); \ | |
| if (!CPU_STAT_PAGING) { \ | |
| dst = cpu_memoryread_##width(addr); \ | |
| res = (*func)(dst, arg); \ | |
| cpu_memorywrite_##width(addr, res); \ | |
| } else { \ | |
| dst = cpu_memory_access_la_RMW_##width(addr, func, arg); \ | |
| } \ | |
| return dst; \ | |
| \ | \ |
| range_failure: \ | range_failure: \ |
| VERBOSE(("cpu_memory_access_va_RMW_" #width ": type = %d, offset = %08x, length = %d, limit = %08x", sdp->type, offset, length, sdp->u.seg.limit)); \ | VERBOSE(("cpu_vmemory_RMW_" #width ": type = %d, offset = %08x, length = %d, limit = %08x", sdp->type, offset, length, sdp->u.seg.limit)); \ |
| exc = (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION; \ | exc = CHOOSE_EXCEPTION(idx); \ |
| err: \ | err: \ |
| EXCEPTION(exc, 0); \ | EXCEPTION(exc, 0); \ |
| return 0; /* compiler happy */ \ | return 0; /* compiler happy */ \ |