|
|
| version 1.16, 2004/03/24 14:34:23 | version 1.27, 2012/01/08 08:19:22 |
|---|---|
| Line 1 | Line 1 |
| /* $Id$ */ | |
| /* | /* |
| * Copyright (c) 2002-2004 NONAKA Kimihiro | * Copyright (c) 2002-2004 NONAKA Kimihiro |
| * All rights reserved. | * All rights reserved. |
| Line 12 | Line 10 |
| * 2. Redistributions in binary form must reproduce the above copyright | * 2. Redistributions in binary form must reproduce the above copyright |
| * notice, this list of conditions and the following disclaimer in the | * notice, this list of conditions and the following disclaimer in the |
| * documentation and/or other materials provided with the distribution. | * documentation and/or other materials provided with the distribution. |
| * 3. The name of the author may not be used to endorse or promote products | |
| * derived from this software without specific prior written permission. | |
| * | * |
| * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
| * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
| Line 35 | Line 31 |
| /* | /* |
| * memory access check | * memory access check |
| */ | */ |
| void | static int MEMCALL check_limit_upstairs(descriptor_t *sdp, UINT32 offset, UINT len); |
| cpu_memoryread_check(descriptor_t *sd, UINT32 offset, UINT length, int e) | static void MEMCALL cpu_memoryread_check(descriptor_t *sdp, UINT32 offset, UINT len, int e); |
| static void MEMCALL cpu_memorywrite_check(descriptor_t *sdp, UINT32 offset, UINT len, int e); | |
| static int MEMCALL | |
| check_limit_upstairs(descriptor_t *sdp, UINT32 offset, UINT len) | |
| { | { |
| UINT32 uplimit; | UINT32 limit; |
| UINT32 end; | |
| if (CPU_STAT_PM) { | __ASSERT(sdp != NULL); |
| /* invalid */ | __ASSERT(len > 0); |
| if (!sd->valid) { | |
| VERBOSE(("cpu_memoryread_check: invalid")); | |
| EXCEPTION(GP_EXCEPTION, 0); | |
| } | |
| /* not present */ | len--; |
| if (!sd->p) { | end = offset + len; |
| VERBOSE(("cpu_memoryread_check: not present")); | limit = SEG_IS_32BIT(sdp) ? 0xffffffff : 0x0000ffff; |
| EXCEPTION(e, 0); | |
| if (SEG_IS_DATA(sdp) && SEG_IS_EXPANDDOWN_DATA(sdp)) { | |
| /* expand-down data segment */ | |
| if (sdp->u.seg.limit == 0) { | |
| /* | |
| * 32bit 16bit | |
| * +-------+ +-------+ FFFFFFFFh | |
| * | | | | | |
| * | | + [1] + 0000FFFFh | |
| * | valid | | | | |
| * | | +-------+ 0000FFFFh - len -1 | |
| * | | | valid | | |
| * +-------+ +-------+ 00000000h | |
| */ | |
| if (!SEG_IS_32BIT(sdp)) { | |
| if ((len > limit) /* len check */ | |
| || (end > limit)) { /* [1] */ | |
| return 0; | |
| } | |
| } else { | |
| sdp->flag |= CPU_DESC_FLAG_WHOLEADR; | |
| } | |
| } else { | |
| /* | |
| * 32bit 16bit | |
| * +-------+ +-------+ FFFFFFFFh | |
| * | [2] | | | | |
| * +-------+ +.......+ FFFFFFFFh - len - 1 | |
| * | | | [2] | | |
| * | | +.......+ 0000FFFFh | |
| * | valid | | | | |
| * | | +-------+ 0000FFFFh - len - 1 | |
| * | | | valid | | |
| * +-------+ +-------+ seg.limit | |
| * | [1] | | [1] | | |
| * +-------+ +-------+ 00000000h | |
| */ | |
| if ((len > limit - sdp->u.seg.limit) /* len check */ | |
| || (end < offset) /* wrap check */ | |
| || (offset < sdp->u.seg.limit) /* [1] */ | |
| || (end > limit)) { /* [2] */ | |
| return 0; | |
| } | |
| } | |
| } else { | |
| /* expand-up data or code segment */ | |
| if (sdp->u.seg.limit == limit) { | |
| /* | |
| * 32bit 16bit | |
| * +-------+ +-------+ FFFFFFFFh | |
| * | | | | | |
| * | | + [1] + 0000FFFFh | |
| * | valid | | | | |
| * | | +-------+ 0000FFFFh - len - 1 | |
| * | | | valid | | |
| * +-------+ +-------+ 00000000h | |
| */ | |
| if (!SEG_IS_32BIT(sdp)) { | |
| if ((len > limit) /* len check */ | |
| || (offset + len > limit)) { /* [1] */ | |
| return 0; | |
| } | |
| } else { | |
| sdp->flag |= CPU_DESC_FLAG_WHOLEADR; | |
| } | |
| } else { | |
| /* | |
| * 32bit 16bit | |
| * +-------+ +-------+ FFFFFFFFh | |
| * | | | | | |
| * | | +.......+ 0000FFFFh | |
| * | [1] | | [1] | | |
| * +.......+ +.......+ seg.limit | |
| * | | | | | |
| * +-------+ +-------+ seg.limit - len - 1 | |
| * | valid | | valid | | |
| * +-------+ +-------+ 00000000h | |
| */ | |
| if ((len > sdp->u.seg.limit) /* len check */ | |
| || (end < offset) /* wrap check */ | |
| || (end > sdp->u.seg.limit)) { /* [1] */ | |
| return 0; | |
| } | |
| } | } |
| } | } |
| return 1; /* Ok! */ | |
| } | |
| static void MEMCALL | |
| cpu_memoryread_check(descriptor_t *sdp, UINT32 offset, UINT len, int e) | |
| { | |
| switch (sd->type) { | __ASSERT(sdp != NULL); |
| __ASSERT(len > 0); | |
| if (!SEG_IS_VALID(sdp)) { | |
| e = GP_EXCEPTION; | |
| goto exc; | |
| } | |
| if (!SEG_IS_PRESENT(sdp) | |
| || SEG_IS_SYSTEM(sdp) | |
| || (SEG_IS_CODE(sdp) && !SEG_IS_READABLE_CODE(sdp))) { | |
| goto exc; | |
| } | |
| switch (sdp->type) { | |
| case 0: case 1: /* ro */ | case 0: case 1: /* ro */ |
| case 2: case 3: /* rw */ | case 2: case 3: /* rw */ |
| case 10: case 11: /* rx */ | |
| case 14: case 15: /* rxc */ | |
| if (offset > sd->u.seg.limit - length + 1) { | |
| VERBOSE(("cpu_memoryread_check: offset(%08x) > sd->u.seg.limit(%08x) - length(%08x) + 1", offset, sd->u.seg.limit, length)); | |
| EXCEPTION(e, 0); | |
| } | |
| if (length - 1 > sd->u.seg.limit) { | |
| VERBOSE(("cpu_memoryread_check: length(%08x) - 1 > sd->u.seg.limit(%08x)", length, sd->u.seg.limit)); | |
| EXCEPTION(e, 0); | |
| } | |
| break; | |
| case 4: case 5: /* ro (expand down) */ | case 4: case 5: /* ro (expand down) */ |
| case 6: case 7: /* rw (expand down) */ | case 6: case 7: /* rw (expand down) */ |
| uplimit = sd->d ? 0xffffffff : 0x0000ffff; | case 10: case 11: /* rx */ |
| if (offset <= sd->u.seg.limit) { | case 14: case 15: /* rxc */ |
| VERBOSE(("cpu_memoryread_check: offset(%08x) <= sd->u.seg.limit(%08x)", offset, sd->u.seg.limit)); | if (!check_limit_upstairs(sdp, offset, len)) |
| EXCEPTION(e, 0); | goto exc; |
| } | |
| if (offset > uplimit) { | |
| VERBOSE(("cpu_memoryread_check: offset(%08x) > uplimit(%08x)", offset, uplimit)); | |
| EXCEPTION(e, 0); | |
| } | |
| if (uplimit - offset < length - 1) { | |
| VERBOSE(("cpu_memoryread_check: uplimit(%08x) - offset(%08x) < length(%08x) - 1", uplimit, offset, length)); | |
| EXCEPTION(e, 0); | |
| } | |
| break; | break; |
| default: | default: |
| VERBOSE(("cpu_memoryread_check: invalid type (type = %d)", sd->type)); | goto exc; |
| EXCEPTION(e, 0); | |
| break; | |
| } | } |
| sd->flag |= CPU_DESC_FLAG_READABLE; | sdp->flag |= CPU_DESC_FLAG_READABLE; |
| return; | |
| exc: | |
| VERBOSE(("cpu_memoryread_check: check failure.")); | |
| VERBOSE(("offset = 0x%08x, len = %d", offset, len)); | |
| #if defined(DEBUG) | |
| segdesc_dump(sdp); | |
| #endif | |
| EXCEPTION(e, 0); | |
| } | } |
| void | static void MEMCALL |
| cpu_memorywrite_check(descriptor_t *sd, UINT32 offset, UINT length, int e) | cpu_memorywrite_check(descriptor_t *sdp, UINT32 offset, UINT len, int e) |
| { | { |
| UINT32 uplimit; | |
| if (CPU_STAT_PM) { | |
| /* invalid */ | |
| if (!sd->valid) { | |
| VERBOSE(("cpu_memorywrite_check: invalid")); | |
| EXCEPTION(GP_EXCEPTION, 0); | |
| } | |
| /* not present */ | __ASSERT(sdp != NULL); |
| if (!sd->p) { | __ASSERT(len > 0); |
| VERBOSE(("cpu_memorywrite_check: not present")); | |
| EXCEPTION(e, 0); | |
| } | |
| if (!sd->s) { | if (!SEG_IS_VALID(sdp)) { |
| VERBOSE(("cpu_memorywrite_check: system segment")); | e = GP_EXCEPTION; |
| EXCEPTION(e, 0); | goto exc; |
| } | } |
| if (!SEG_IS_PRESENT(sdp) | |
| || SEG_IS_SYSTEM(sdp) | |
| || SEG_IS_CODE(sdp) | |
| || (SEG_IS_DATA(sdp) && !SEG_IS_WRITABLE_DATA(sdp))) { | |
| goto exc; | |
| } | } |
| switch (sd->type) { | switch (sdp->type) { |
| case 2: case 3: /* rw */ | case 2: case 3: /* rw */ |
| if (offset > sd->u.seg.limit - length + 1) { | |
| VERBOSE(("cpu_memorywrite_check: offset(%08x) > sd->u.seg.limit(%08x) - length(%08x) + 1", offset, sd->u.seg.limit, length)); | |
| EXCEPTION(e, 0); | |
| } | |
| if (length - 1 > sd->u.seg.limit) { | |
| VERBOSE(("cpu_memorywrite_check: length(%08x) - 1 > sd->u.seg.limit(%08x)", length, sd->u.seg.limit)); | |
| EXCEPTION(e, 0); | |
| } | |
| break; | |
| case 6: case 7: /* rw (expand down) */ | case 6: case 7: /* rw (expand down) */ |
| uplimit = sd->d ? 0xffffffff : 0x0000ffff; | if (!check_limit_upstairs(sdp, offset, len)) |
| if (offset <= sd->u.seg.limit) { | goto exc; |
| VERBOSE(("cpu_memorywrite_check: offset(%08x) <= sd->u.seg.limit(%08x)", offset, sd->u.seg.limit)); | |
| EXCEPTION(e, 0); | |
| } | |
| if (offset > uplimit) { | |
| VERBOSE(("cpu_memorywrite_check: offset(%08x) > uplimit(%08x)", offset, uplimit)); | |
| EXCEPTION(e, 0); | |
| } | |
| if (uplimit - offset < length - 1) { | |
| VERBOSE(("cpu_memorywrite_check: uplimit(%08x) - offset(%08x) < length(%08x) - 1", uplimit, offset, length)); | |
| EXCEPTION(e, 0); | |
| } | |
| break; | break; |
| default: | default: |
| VERBOSE(("cpu_memorywrite_check: invalid type (type = %d)", sd->type)); | goto exc; |
| EXCEPTION(e, 0); | |
| break; | |
| } | } |
| sd->flag |= CPU_DESC_FLAG_WRITABLE; | sdp->flag |= CPU_DESC_FLAG_WRITABLE | CPU_DESC_FLAG_READABLE; |
| return; | |
| exc: | |
| VERBOSE(("cpu_memorywrite_check: check failure.")); | |
| VERBOSE(("offset = 0x%08x, len = %d", offset, len)); | |
| #if defined(DEBUG) | |
| segdesc_dump(sdp); | |
| #endif | |
| EXCEPTION(e, 0); | |
| } | } |
| void | void MEMCALL |
| cpu_stack_push_check(UINT16 s, descriptor_t *sd, UINT32 esp, UINT length) | cpu_stack_push_check(UINT16 s, descriptor_t *sdp, UINT32 sp, UINT len) |
| { | { |
| UINT32 limit; | UINT32 limit; |
| UINT32 start; | |
| if (CPU_STAT_PM) { | __ASSERT(sdp != NULL); |
| if (!sd->valid || !sd->p) { | __ASSERT(len > 0); |
| VERBOSE(("cpu_stack_push_check: valid = %d, present = %d", sd->valid, sd->p)); | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | |
| } | |
| if (!sd->s || sd->u.seg.c || !sd->u.seg.wr) { | |
| VERBOSE(("cpu_stack_push_check: s = %d, c = %d, wr", sd->s, sd->u.seg.c, sd->u.seg.wr)); | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | |
| } | |
| if (!sd->d) { | if (!SEG_IS_VALID(sdp) |
| limit = 0xffff; | || !SEG_IS_PRESENT(sdp) |
| } else { | || SEG_IS_SYSTEM(sdp) |
| limit = 0xffffffff; | || SEG_IS_CODE(sdp) |
| } | || !SEG_IS_WRITABLE_DATA(sdp)) { |
| if (sd->u.seg.ec) { | goto exc; |
| /* expand-down stack */ | } |
| if ((esp == 0) | |
| || (esp < length) | len--; |
| || (esp - length <= sd->u.seg.limit) | start = sp - len; |
| || (esp > limit)) { | limit = SEG_IS_32BIT(sdp) ? 0xffffffff : 0x0000ffff; |
| VERBOSE(("cpu_stack_push_check: expand-down, esp = %08x, length = %08x", esp, length)); | |
| VERBOSE(("cpu_stack_push_check: limit = %08x, seglimit = %08x", limit, sd->u.seg.limit)); | if (SEG_IS_EXPANDDOWN_DATA(sdp)) { |
| VERBOSE(("cpu_stack_push_check: segbase = %08x, segend = %08x", sd->u.seg.segbase, sd->u.seg.segend)); | /* expand-down stack */ |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | if (!SEG_IS_32BIT(sdp)) { |
| if (sp > limit) { /* [*] */ | |
| goto exc; | |
| } | } |
| } else { | } |
| /* expand-up stack */ | if (sdp->u.seg.limit == 0) { |
| if (esp == 0) { | /* |
| if ((sd->d && (sd->u.seg.segend != 0xffffffff)) | * 32bit 16bit |
| || (!sd->d && (sd->u.seg.segend != 0xffff))) { | * +-------+ +-------+ FFFFFFFFh |
| VERBOSE(("cpu_stack_push_check: expand-up, esp = %08x, length = %08x", esp, length)); | * | | | [*] | |
| VERBOSE(("cpu_stack_push_check: limit = %08x, seglimit = %08x", limit, sd->u.seg.limit)); | * | | +-------+ 0000FFFFh |
| VERBOSE(("cpu_stack_push_check: segbase = %08x, segend = %08x", sd->u.seg.segbase, sd->u.seg.segend)); | * | valid | | | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | * | | | valid | |
| * | | | | | |
| * +-------+ +-------+ 00000000h | |
| */ | |
| if (!SEG_IS_32BIT(sdp)) { | |
| if (sp > limit) { /* [1] */ | |
| goto exc; | |
| } | } |
| } else { | } else { |
| if ((esp < length) | sdp->flag |= CPU_DESC_FLAG_WHOLEADR; |
| || (esp - 1 > sd->u.seg.limit)) { | |
| VERBOSE(("cpu_stack_push_check: expand-up, esp = %08x, length = %08x", esp, length)); | |
| VERBOSE(("cpu_stack_push_check: limit = %08x, seglimit = %08x", limit, sd->u.seg.limit)); | |
| VERBOSE(("cpu_stack_push_check: segbase = %08x, segend = %08x", sd->u.seg.segbase, sd->u.seg.segend)); | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | |
| } | |
| } | } |
| } | |
| } | |
| } | |
| void | |
| cpu_stack_pop_check(UINT16 s, descriptor_t *sd, UINT32 esp, UINT length) | |
| { | |
| UINT32 limit; | |
| if (CPU_STAT_PM) { | |
| if (!sd->valid || !sd->p) { | |
| VERBOSE(("cpu_stack_pop_check: valid = %d, present = %d", sd->valid, sd->p)); | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | |
| } | |
| if (!sd->s || sd->u.seg.c || !sd->u.seg.wr) { | |
| VERBOSE(("cpu_stack_pop_check: s = %d, c = %d, wr", sd->s, sd->u.seg.c, sd->u.seg.wr)); | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | |
| } | |
| if (!sd->d) { | |
| limit = 0xffff; | |
| } else { | } else { |
| limit = 0xffffffff; | /* |
| * 32bit 16bit | |
| * +-------+ +-------+ FFFFFFFFh | |
| * | | | [*] | | |
| * | valid | +-------+ 0000FFFFh | |
| * | | | valid | | |
| * +-------+ +-------+ seg.limit + len - 1 | |
| * | | | | | |
| * +..[1]..+ +..[1]..+ seg.limit | |
| * | | | | | |
| * +-------+ +-------+ 00000000h | |
| */ | |
| if ((len > limit - sdp->u.seg.limit) /* len check */ | |
| || (start > sp) /* wrap check */ | |
| || (start < sdp->u.seg.limit)) { /* [1] */ | |
| goto exc; | |
| } | |
| } | } |
| if (sd->u.seg.ec) { | } else { |
| /* expand-down stack */ | /* expand-up stack */ |
| if ((esp == limit) | if (sdp->u.seg.limit == limit) { |
| || ((limit - esp) + 1 < length)) { | /* |
| VERBOSE(("cpu_stack_pop_check: expand-up, esp = %08x, length = %08x", esp, length)); | * 32bit 16bit |
| VERBOSE(("cpu_stack_pop_check: limit = %08x, seglimit = %08x", limit, sd->u.seg.limit)); | * +-------+ +-------+ FFFFFFFFh |
| VERBOSE(("cpu_stack_pop_check: segbase = %08x, segend = %08x", sd->u.seg.segbase, sd->u.seg.segend)); | * | | | [1] | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | * | | +-------+ 0000FFFFh |
| * | valid | | | | |
| * | | | valid | | |
| * | | | | | |
| * +-------+ +-------+ 00000000h | |
| */ | |
| if (!SEG_IS_32BIT(sdp)) { | |
| if (sp > limit) { /* [1] */ | |
| goto exc; | |
| } | |
| } else { | |
| sdp->flag |= CPU_DESC_FLAG_WHOLEADR; | |
| } | } |
| } else { | } else { |
| /* expand-up stack */ | /* |
| if ((esp == limit) | * 32bit 16bit |
| || (sd->u.seg.segend == 0) | * +-------+ +-------+ FFFFFFFFh |
| || (esp > sd->u.seg.limit) | * | | | | |
| || ((sd->u.seg.limit - esp) + 1 < length)) { | * | [1] | + [1] + 0000FFFFh |
| VERBOSE(("cpu_stack_pop_check: expand-up, esp = %08x, length = %08x", esp, length)); | * | | | | |
| VERBOSE(("cpu_stack_pop_check: limit = %08x, seglimit = %08x", limit, sd->u.seg.limit)); | * +-------+ +-------+ seg.limit |
| VERBOSE(("cpu_stack_pop_check: segbase = %08x, segend = %08x", sd->u.seg.segbase, sd->u.seg.segend)); | * | valid | | valid | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | * +.......+ +.......+ len - 1 |
| * | [+] | | [+] | | |
| * +-------+ +-------+ 00000000h | |
| * | |
| * [+]: wrap check | |
| */ | |
| if ((len > sdp->u.seg.limit) /* len check */ | |
| || (start > sp) /* wrap check */ | |
| || (sp > sdp->u.seg.limit + 1)) { /* [1] */ | |
| goto exc; | |
| } | } |
| } | } |
| } | } |
| } | return; |
| #if defined(IA32_SUPPORT_PREFETCH_QUEUE) | |
| /* | |
| * code prefetch | |
| */ | |
| #define CPU_PREFETCHQ_MASK (CPU_PREFETCH_QUEUE_LENGTH - 1) | |
| INLINE static MEMCALL void | exc: |
| cpu_prefetch(UINT32 address) | VERBOSE(("cpu_stack_push_check: check failure.")); |
| { | VERBOSE(("cpu_stack_push_check: selector = %04x, sp = 0x%08x, len = %d", s, sp, len + 1)); |
| UINT offset = address & CPU_PREFETCHQ_MASK; | #if defined(DEBUG) |
| UINT length = CPU_PREFETCH_QUEUE_LENGTH - offset; | segdesc_dump(sdp); |
| #endif | |
| cpu_memory_access_la_region(address, length, CPU_PAGE_READ_CODE|CPU_STAT_USER_MODE, CPU_PREFETCHQ + offset); | EXCEPTION(SS_EXCEPTION, s & 0xfffc); |
| CPU_PREFETCHQ_REMAIN = (SINT8)length; | |
| } | |
| INLINE static MEMCALL UINT8 | |
| cpu_prefetchq(UINT32 address) | |
| { | |
| UINT8 v; | |
| CPU_PREFETCHQ_REMAIN--; | |
| v = CPU_PREFETCHQ[address & CPU_PREFETCHQ_MASK]; | |
| return v; | |
| } | } |
| INLINE static MEMCALL UINT16 | void MEMCALL |
| cpu_prefetchq_w(UINT32 address) | cpu_stack_pop_check(UINT16 s, descriptor_t *sdp, UINT32 sp, UINT len) |
| { | { |
| BYTE *p; | |
| UINT16 v; | |
| CPU_PREFETCHQ_REMAIN -= 2; | __ASSERT(sdp != NULL); |
| p = CPU_PREFETCHQ + (address & CPU_PREFETCHQ_MASK); | __ASSERT(len > 0); |
| v = LOADINTELWORD(p); | |
| return v; | |
| } | |
| INLINE static MEMCALL UINT32 | if (!SEG_IS_VALID(sdp) |
| cpu_prefetchq_3(UINT32 address) | || !SEG_IS_PRESENT(sdp) |
| { | || SEG_IS_SYSTEM(sdp) |
| BYTE *p; | || SEG_IS_CODE(sdp) |
| UINT32 v; | || !SEG_IS_WRITABLE_DATA(sdp)) { |
| goto exc; | |
| CPU_PREFETCHQ_REMAIN -= 3; | } |
| p = CPU_PREFETCHQ + (address & CPU_PREFETCHQ_MASK); | |
| v = LOADINTELWORD(p); | |
| v += ((UINT32)p[2]) << 16; | |
| return v; | |
| } | |
| INLINE static MEMCALL UINT32 | |
| cpu_prefetchq_d(UINT32 address) | |
| { | |
| BYTE *p; | |
| UINT32 v; | |
| CPU_PREFETCHQ_REMAIN -= 4; | if (!check_limit_upstairs(sdp, sp, len)) |
| p = CPU_PREFETCHQ + (address & CPU_PREFETCHQ_MASK); | goto exc; |
| v = LOADINTELDWORD(p); | return; |
| return v; | |
| exc: | |
| VERBOSE(("cpu_stack_pop_check: check failure.")); | |
| VERBOSE(("s = 0x%04x, sp = 0x%08x, len = %d", s, sp, len)); | |
| #if defined(DEBUG) | |
| segdesc_dump(sdp); | |
| #endif | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | |
| } | } |
| #endif /* IA32_SUPPORT_PREFETCH_QUEUE */ | |
| #if defined(IA32_SUPPORT_DEBUG_REGISTER) | #if defined(IA32_SUPPORT_DEBUG_REGISTER) |
| INLINE static void | static INLINE void |
| check_memory_break_point(UINT32 address, UINT length, UINT rw) | check_memory_break_point(UINT32 address, UINT length, UINT rw) |
| { | { |
| int i; | int i; |
| Line 339 check_memory_break_point(UINT32 address, | Line 386 check_memory_break_point(UINT32 address, |
| #define check_memory_break_point(address, length, rw) | #define check_memory_break_point(address, length, rw) |
| #endif | #endif |
| /* | /* |
| * code fetch | * code fetch |
| */ | */ |
| #define ucrw (CPU_PAGE_READ_CODE | CPU_STAT_USER_MODE) | |
| UINT8 MEMCALL | UINT8 MEMCALL |
| cpu_codefetch(UINT32 offset) | cpu_codefetch(UINT32 offset) |
| { | { |
| descriptor_t *sd; | descriptor_t *sdp; |
| UINT32 addr; | UINT32 addr; |
| TLB_ENTRY_T *ep; | |
| sd = &CPU_STAT_SREG(CPU_CS_INDEX); | sdp = &CPU_CS_DESC; |
| if (offset <= sd->u.seg.limit) { | if (offset <= sdp->u.seg.limit) { |
| addr = sd->u.seg.segbase + offset; | addr = sdp->u.seg.segbase + offset; |
| #if defined(IA32_SUPPORT_PREFETCH_QUEUE) | |
| if (CPU_PREFETCHQ_REMAIN <= 0) { | |
| cpu_prefetch(addr); | |
| } | |
| return cpu_prefetchq(addr); | |
| #else /* !IA32_SUPPORT_PREFETCH_QUEUE */ | |
| if (!CPU_STAT_PAGING) | if (!CPU_STAT_PAGING) |
| return cpu_memoryread(addr); | return cpu_memoryread(addr); |
| return cpu_lcmemoryread(addr); | ep = tlb_lookup(addr, ucrw); |
| #endif /* IA32_SUPPORT_PREFETCH_QUEUE */ | if (ep != NULL && ep->memp != NULL) { |
| return ep->memp[addr & 0xfff]; | |
| } | |
| return cpu_linear_memory_read_b(addr, ucrw); | |
| } | } |
| EXCEPTION(GP_EXCEPTION, 0); | EXCEPTION(GP_EXCEPTION, 0); |
| return 0; /* compiler happy */ | return 0; /* compiler happy */ |
| Line 369 cpu_codefetch(UINT32 offset) | Line 417 cpu_codefetch(UINT32 offset) |
| UINT16 MEMCALL | UINT16 MEMCALL |
| cpu_codefetch_w(UINT32 offset) | cpu_codefetch_w(UINT32 offset) |
| { | { |
| descriptor_t *sd; | descriptor_t *sdp; |
| UINT32 addr; | UINT32 addr; |
| #if defined(IA32_SUPPORT_PREFETCH_QUEUE) | TLB_ENTRY_T *ep; |
| UINT16 v; | UINT16 value; |
| #endif | |
| sd = &CPU_STAT_SREG(CPU_CS_INDEX); | |
| if (offset <= sd->u.seg.limit - 1) { | |
| addr = sd->u.seg.segbase + offset; | |
| #if defined(IA32_SUPPORT_PREFETCH_QUEUE) | |
| if (CPU_PREFETCHQ_REMAIN <= 0) { | |
| cpu_prefetch(addr); | |
| } | |
| if (CPU_PREFETCHQ_REMAIN >= 2) { | |
| return cpu_prefetchq_w(addr); | |
| } | |
| v = cpu_prefetchq(addr); | sdp = &CPU_CS_DESC; |
| addr++; | if (offset <= sdp->u.seg.limit - 1) { |
| cpu_prefetch(addr); | addr = sdp->u.seg.segbase + offset; |
| v += (UINT16)cpu_prefetchq(addr) << 8; | |
| return v; | |
| #else /* !IA32_SUPPORT_PREFETCH_QUEUE */ | |
| if (!CPU_STAT_PAGING) | if (!CPU_STAT_PAGING) |
| return cpu_memoryread_w(addr); | return cpu_memoryread_w(addr); |
| return cpu_lcmemoryread_w(addr); | ep = tlb_lookup(addr, ucrw); |
| #endif /* IA32_SUPPORT_PREFETCH_QUEUE */ | if (ep != NULL && ep->memp != NULL) { |
| if ((addr + 1) & 0x00000fff) { | |
| return LOADINTELWORD(ep->memp + (addr & 0xfff)); | |
| } | |
| value = ep->memp[0xfff]; | |
| ep = tlb_lookup(addr + 1, ucrw); | |
| if (ep != NULL && ep->memp != NULL) { | |
| value += (UINT16)ep->memp[0] << 8; | |
| return value; | |
| } | |
| } | |
| return cpu_linear_memory_read_w(addr, ucrw); | |
| } | } |
| EXCEPTION(GP_EXCEPTION, 0); | EXCEPTION(GP_EXCEPTION, 0); |
| return 0; /* compiler happy */ | return 0; /* compiler happy */ |
| Line 404 cpu_codefetch_w(UINT32 offset) | Line 448 cpu_codefetch_w(UINT32 offset) |
| UINT32 MEMCALL | UINT32 MEMCALL |
| cpu_codefetch_d(UINT32 offset) | cpu_codefetch_d(UINT32 offset) |
| { | { |
| descriptor_t *sd; | descriptor_t *sdp; |
| UINT32 addr; | UINT32 addr; |
| #if defined(IA32_SUPPORT_PREFETCH_QUEUE) | TLB_ENTRY_T *ep[2]; |
| UINT32 v; | UINT32 value; |
| #endif | UINT remain; |
| sd = &CPU_STAT_SREG(CPU_CS_INDEX); | sdp = &CPU_CS_DESC; |
| if (offset <= sd->u.seg.limit - 3) { | if (offset <= sdp->u.seg.limit - 3) { |
| addr = sd->u.seg.segbase + offset; | addr = sdp->u.seg.segbase + offset; |
| #if defined(IA32_SUPPORT_PREFETCH_QUEUE) | |
| if (CPU_PREFETCHQ_REMAIN <= 0) { | |
| cpu_prefetch(addr); | |
| } | |
| if (CPU_PREFETCHQ_REMAIN >= 4) { | |
| return cpu_prefetchq_d(addr); | |
| } else { | |
| switch (CPU_PREFETCHQ_REMAIN) { | |
| case 1: | |
| v = cpu_prefetchq(addr); | |
| addr++; | |
| cpu_prefetch(addr); | |
| v += (UINT32)cpu_prefetchq_3(addr) << 8; | |
| break; | |
| case 2: | |
| v = cpu_prefetchq_w(addr); | |
| addr += 2; | |
| cpu_prefetch(addr); | |
| v += (UINT32)cpu_prefetchq_w(addr) << 16; | |
| break; | |
| case 3: | |
| v = cpu_prefetchq_3(addr); | |
| addr += 3; | |
| cpu_prefetch(addr); | |
| v += (UINT32)cpu_prefetchq(addr) << 24; | |
| break; | |
| } | |
| return v; | |
| } | |
| #else /* !IA32_SUPPORT_PREFETCH_QUEUE */ | |
| if (!CPU_STAT_PAGING) | if (!CPU_STAT_PAGING) |
| return cpu_memoryread_d(addr); | return cpu_memoryread_d(addr); |
| return cpu_lcmemoryread_d(addr); | ep[0] = tlb_lookup(addr, ucrw); |
| #endif /* IA32_SUPPORT_PREFETCH_QUEUE */ | if (ep[0] != NULL && ep[0]->memp != NULL) { |
| remain = 0x1000 - (addr & 0xfff); | |
| if (remain >= 4) { | |
| return LOADINTELDWORD(ep[0]->memp + (addr & 0xfff)); | |
| } | |
| ep[1] = tlb_lookup(addr + remain, ucrw); | |
| if (ep[1] != NULL && ep[1]->memp != NULL) { | |
| switch (remain) { | |
| case 3: | |
| value = ep[0]->memp[0xffd]; | |
| value += (UINT32)LOADINTELWORD(ep[0]->memp + 0xffe) << 8; | |
| value += (UINT32)ep[1]->memp[0] << 24; | |
| break; | |
| case 2: | |
| value = LOADINTELWORD(ep[0]->memp + 0xffe); | |
| value += (UINT32)LOADINTELWORD(ep[1]->memp + 0) << 16; | |
| break; | |
| case 1: | |
| value = ep[0]->memp[0xfff]; | |
| value += (UINT32)LOADINTELWORD(ep[1]->memp + 0) << 8; | |
| value += (UINT32)ep[1]->memp[2] << 24; | |
| break; | |
| default: | |
| ia32_panic("cpu_codefetch_d(): out of range. (remain = %d)\n", remain); | |
| return (UINT32)-1; | |
| } | |
| return value; | |
| } | |
| } | |
| return cpu_linear_memory_read_d(addr, ucrw); | |
| } | } |
| EXCEPTION(GP_EXCEPTION, 0); | EXCEPTION(GP_EXCEPTION, 0); |
| return 0; /* compiler happy */ | return 0; /* compiler happy */ |
| } | } |
| #undef ucrw | |
| /* | |
| * additional physical address memory access functions | |
| */ | |
| UINT64 MEMCALL | |
| cpu_memoryread_q(UINT32 paddr) | |
| { | |
| UINT64 value; | |
| value = cpu_memoryread_d(paddr); | |
| value += (UINT64)cpu_memoryread_d(paddr + 4) << 32; | |
| return value; | |
| } | |
| void MEMCALL | |
| cpu_memorywrite_q(UINT32 paddr, UINT64 value) | |
| { | |
| cpu_memorywrite_d(paddr, (UINT32)value); | |
| cpu_memorywrite_d(paddr + 4, (UINT32)(value >> 32)); | |
| } | |
| REG80 MEMCALL | |
| cpu_memoryread_f(UINT32 paddr) | |
| { | |
| REG80 value; | |
| int i; | |
| for (i = 0; i < (int)sizeof(REG80); ++i) { | |
| value.b[i] = cpu_memoryread(paddr + i); | |
| } | |
| return value; | |
| } | |
| void MEMCALL | |
| cpu_memorywrite_f(UINT32 paddr, const REG80 *value) | |
| { | |
| int i; | |
| for (i = 0; i < (int)sizeof(REG80); ++i) { | |
| cpu_memorywrite(paddr + i, value->b[i]); | |
| } | |
| } | |
| /* | /* |
| * virtual address memory access functions | * virtual address memory access functions |
| Line 463 cpu_codefetch_d(UINT32 offset) | Line 552 cpu_codefetch_d(UINT32 offset) |
| VIRTUAL_ADDRESS_MEMORY_ACCESS_FUNCTION(b, UINT8, 1) | VIRTUAL_ADDRESS_MEMORY_ACCESS_FUNCTION(b, UINT8, 1) |
| VIRTUAL_ADDRESS_MEMORY_ACCESS_FUNCTION(w, UINT16, 2) | VIRTUAL_ADDRESS_MEMORY_ACCESS_FUNCTION(w, UINT16, 2) |
| VIRTUAL_ADDRESS_MEMORY_ACCESS_FUNCTION(d, UINT32, 4) | VIRTUAL_ADDRESS_MEMORY_ACCESS_FUNCTION(d, UINT32, 4) |
| UINT64 MEMCALL | |
| cpu_vmemoryread_q(int idx, UINT32 offset) | |
| { | |
| descriptor_t *sdp; | |
| UINT32 addr; | |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | |
| sdp = &CPU_STAT_SREG(idx); | |
| if (!SEG_IS_VALID(sdp)) { | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| if (!(sdp->flag & CPU_DESC_FLAG_READABLE)) { | |
| cpu_memoryread_check(sdp, offset, 8, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | |
| } else if (!(sdp->flag & CPU_DESC_FLAG_WHOLEADR)) { | |
| if (!check_limit_upstairs(sdp, offset, 8)) | |
| goto range_failure; | |
| } | |
| addr = sdp->u.seg.segbase + offset; | |
| check_memory_break_point(addr, 8, CPU_DR7_RW_RO); | |
| if (!CPU_STAT_PAGING) | |
| return cpu_memoryread_q(addr); | |
| return cpu_linear_memory_read_q(addr, CPU_PAGE_READ_DATA | CPU_STAT_USER_MODE); | |
| range_failure: | |
| VERBOSE(("cpu_vmemoryread_q: type = %d, offset = %08x, limit = %08x", sdp->type, offset, sdp->u.seg.limit)); | |
| exc = (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION; | |
| err: | |
| EXCEPTION(exc, 0); | |
| return 0; /* compiler happy */ | |
| } | |
| void MEMCALL | |
| cpu_vmemorywrite_q(int idx, UINT32 offset, UINT64 value) | |
| { | |
| descriptor_t *sdp; | |
| UINT32 addr; | |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | |
| sdp = &CPU_STAT_SREG(idx); | |
| if (!SEG_IS_VALID(sdp)) { | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| if (!(sdp->flag & CPU_DESC_FLAG_WRITABLE)) { | |
| cpu_memorywrite_check(sdp, offset, 8, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | |
| } else if (!(sdp->flag & CPU_DESC_FLAG_WHOLEADR)) { | |
| if (!check_limit_upstairs(sdp, offset, 8)) | |
| goto range_failure; | |
| } | |
| addr = sdp->u.seg.segbase + offset; | |
| check_memory_break_point(addr, 8, CPU_DR7_RW_RW); | |
| if (!CPU_STAT_PAGING) { | |
| cpu_memorywrite_q(addr, value); | |
| } else { | |
| cpu_linear_memory_write_q(addr, value, CPU_PAGE_READ_DATA | CPU_STAT_USER_MODE); | |
| } | |
| return; | |
| range_failure: | |
| VERBOSE(("cpu_vmemorywrite_q: type = %d, offset = %08x, limit = %08x", sdp->type, offset, sdp->u.seg.limit)); | |
| exc = (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION; | |
| err: | |
| EXCEPTION(exc, 0); | |
| } | |
| REG80 MEMCALL | |
| cpu_vmemoryread_f(int idx, UINT32 offset) | |
| { | |
| descriptor_t *sdp; | |
| UINT32 addr; | |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | |
| sdp = &CPU_STAT_SREG(idx); | |
| if (!SEG_IS_VALID(sdp)) { | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| if (!(sdp->flag & CPU_DESC_FLAG_READABLE)) { | |
| cpu_memoryread_check(sdp, offset, 10, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | |
| } else if (!(sdp->flag & CPU_DESC_FLAG_WHOLEADR)) { | |
| if (!check_limit_upstairs(sdp, offset, 10)) | |
| goto range_failure; | |
| } | |
| addr = sdp->u.seg.segbase + offset; | |
| check_memory_break_point(addr, 10, CPU_DR7_RW_RO); | |
| if (!CPU_STAT_PAGING) | |
| return cpu_memoryread_f(addr); | |
| return cpu_linear_memory_read_f(addr, CPU_PAGE_READ_DATA | CPU_PAGE_READ_DATA | CPU_STAT_USER_MODE); | |
| range_failure: | |
| VERBOSE(("cpu_vmemoryread_f: type = %d, offset = %08x, limit = %08x", sdp->type, offset, sdp->u.seg.limit)); | |
| exc = (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION; | |
| err: | |
| EXCEPTION(exc, 0); | |
| { | |
| REG80 dummy; | |
| memset(&dummy, 0, sizeof(dummy)); | |
| return dummy; /* compiler happy */ | |
| } | |
| } | |
| void MEMCALL | |
| cpu_vmemorywrite_f(int idx, UINT32 offset, const REG80 *value) | |
| { | |
| descriptor_t *sdp; | |
| UINT32 addr; | |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | |
| sdp = &CPU_STAT_SREG(idx); | |
| if (!SEG_IS_VALID(sdp)) { | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| if (!(sdp->flag & CPU_DESC_FLAG_WRITABLE)) { | |
| cpu_memorywrite_check(sdp, offset, 10, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | |
| } else if (!(sdp->flag & CPU_DESC_FLAG_WHOLEADR)) { | |
| if (!check_limit_upstairs(sdp, offset, 10)) | |
| goto range_failure; | |
| } | |
| addr = sdp->u.seg.segbase + offset; | |
| check_memory_break_point(addr, 10, CPU_DR7_RW_RW); | |
| if (!CPU_STAT_PAGING) { | |
| cpu_memorywrite_f(addr, value); | |
| } else { | |
| cpu_linear_memory_write_f(addr, value, CPU_PAGE_WRITE_DATA | CPU_STAT_USER_MODE); | |
| } | |
| return; | |
| range_failure: | |
| VERBOSE(("cpu_vmemorywrite_f: type = %d, offset = %08x, limit = %08x", sdp->type, offset, sdp->u.seg.limit)); | |
| exc = (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION; | |
| err: | |
| EXCEPTION(exc, 0); | |
| } |