|
|
| version 1.5, 2004/01/13 16:32:16 | version 1.11, 2004/02/20 16:09:04 |
|---|---|
| Line 31 | Line 31 |
| #include "cpu.h" | #include "cpu.h" |
| #include "memory.h" | #include "memory.h" |
| BYTE *cpumem = 0; | |
| DWORD extmem_size = 0; | |
| BYTE protectmem_size = 0; | |
| /* | |
| * initialize 1MB-16MB memory | |
| */ | |
| int | |
| init_cpumem(BYTE usemem) | |
| { | |
| DWORD size; | |
| if (usemem > 13) | |
| usemem = 13; | |
| size = usemem << 20; | |
| if (extmem_size != size - (LOWMEM - 0x100000)) { | |
| if (cpumem) { | |
| free(cpumem); | |
| cpumem = 0; | |
| } | |
| if (size <= LOWMEM - 0x100000) { | |
| extmem_size = 0; | |
| cpumem = 0; | |
| } else { | |
| extmem_size = size - (LOWMEM - 0x100000); | |
| cpumem = (BYTE *)malloc(extmem_size); | |
| if (cpumem == NULL) { | |
| protectmem_size = 0; | |
| return FAILURE; | |
| } | |
| memset(cpumem, 0, extmem_size); | |
| } | |
| } | |
| protectmem_size = usemem; | |
| return SUCCESS; | |
| } | |
| /* | /* |
| * memory access check | * memory access check |
| */ | */ |
| void | void |
| cpu_memoryread_check(descriptor_t* sd, DWORD madr, DWORD length, int e) | cpu_memoryread_check(descriptor_t *sd, UINT32 offset, UINT length, int e) |
| { | { |
| UINT32 uplimit; | |
| if (CPU_STAT_PM) { | if (CPU_STAT_PM) { |
| /* invalid */ | /* invalid */ |
| if (!sd->valid) { | if (!sd->valid) { |
| VERBOSE(("cpu_memoryread_check: invalid")); | |
| EXCEPTION(GP_EXCEPTION, 0); | EXCEPTION(GP_EXCEPTION, 0); |
| } | } |
| /* not present */ | /* not present */ |
| if (!sd->p) { | if (!sd->p) { |
| VERBOSE(("cpu_memoryread_check: not present")); | |
| EXCEPTION(e, 0); | EXCEPTION(e, 0); |
| } | } |
| } | |
| switch (sd->type) { | switch (sd->type) { |
| case 0: case 1: /* ro */ | case 0: case 1: /* ro */ |
| case 2: case 3: /* rw */ | case 2: case 3: /* rw */ |
| case 10: case 11: /* rx */ | case 10: case 11: /* rx */ |
| case 14: case 15: /* rxc */ | case 14: case 15: /* rxc */ |
| if ((madr > sd->u.seg.segend - length + 1) | if (offset > sd->u.seg.limit - length + 1) { |
| || (length - 1 > sd->u.seg.limit)) { | VERBOSE(("cpu_memoryread_check: offset(%08x) > sd->u.seg.limit(%08x) - length(%08x) + 1", offset, sd->u.seg.limit, length)); |
| EXCEPTION(e, 0); | EXCEPTION(e, 0); |
| } | |
| break; | |
| case 4: case 5: /* ro (expand down) */ | |
| case 6: case 7: /* rw (expand down) */ | |
| { | |
| DWORD uplimit = sd->d ? 0xffffffff : 0x0000ffff; | |
| if ((madr <= sd->u.seg.segend) | |
| || (madr > uplimit) | |
| || (uplimit - madr < length - 1)) { | |
| EXCEPTION(e, 0); | |
| } | |
| } | } |
| break; | if (length - 1 > sd->u.seg.limit) { |
| VERBOSE(("cpu_memoryread_check: length(%08x) - 1 > sd->u.seg.limit(%08x)", length, sd->u.seg.limit)); | |
| EXCEPTION(e, 0); | |
| } | |
| break; | |
| default: | case 4: case 5: /* ro (expand down) */ |
| case 6: case 7: /* rw (expand down) */ | |
| uplimit = sd->d ? 0xffffffff : 0x0000ffff; | |
| if (offset <= sd->u.seg.limit) { | |
| VERBOSE(("cpu_memoryread_check: offset(%08x) <= sd->u.seg.limit(%08x)", offset, sd->u.seg.limit)); | |
| EXCEPTION(e, 0); | |
| } | |
| if (offset > uplimit) { | |
| VERBOSE(("cpu_memoryread_check: offset(%08x) > uplimit(%08x)", offset, uplimit)); | |
| EXCEPTION(e, 0); | EXCEPTION(e, 0); |
| break; | |
| } | } |
| if (uplimit - offset < length - 1) { | |
| VERBOSE(("cpu_memoryread_check: uplimit(%08x) - offset(%08x) < length(%08x) - 1", uplimit, offset, length)); | |
| EXCEPTION(e, 0); | |
| } | |
| break; | |
| default: | |
| VERBOSE(("cpu_memoryread_check: invalid type (type = %d)", sd->type)); | |
| EXCEPTION(e, 0); | |
| break; | |
| } | } |
| sd->flag |= CPU_DESC_READABLE; | sd->flag |= CPU_DESC_FLAG_READABLE; |
| } | } |
| void | void |
| cpu_memorywrite_check(descriptor_t* sd, DWORD madr, DWORD length, int e) | cpu_memorywrite_check(descriptor_t *sd, UINT32 offset, UINT length, int e) |
| { | { |
| UINT32 uplimit; | |
| if (CPU_STAT_PM) { | if (CPU_STAT_PM) { |
| /* invalid */ | /* invalid */ |
| if (!sd->valid) { | if (!sd->valid) { |
| VERBOSE(("cpu_memorywrite_check: invalid")); | |
| EXCEPTION(GP_EXCEPTION, 0); | EXCEPTION(GP_EXCEPTION, 0); |
| } | } |
| /* not present */ | /* not present */ |
| if (!sd->p) { | if (!sd->p) { |
| VERBOSE(("cpu_memorywrite_check: not present")); | |
| EXCEPTION(e, 0); | EXCEPTION(e, 0); |
| } | } |
| switch (sd->type) { | if (!sd->s) { |
| case 2: case 3: /* rw */ | VERBOSE(("cpu_memorywrite_check: system segment")); |
| if ((madr > sd->u.seg.segend - length + 1) | EXCEPTION(e, 0); |
| || (length - 1 > sd->u.seg.limit)) { | } |
| EXCEPTION(e, 0); | } |
| } | |
| break; | |
| case 6: case 7: /* rw (expand down) */ | switch (sd->type) { |
| { | case 2: case 3: /* rw */ |
| DWORD uplimit = sd->d ? 0xffffffff : 0x0000ffff; | if (offset > sd->u.seg.limit - length + 1) { |
| if ((madr <= sd->u.seg.segend) | VERBOSE(("cpu_memorywrite_check: offset(%08x) > sd->u.seg.limit(%08x) - length(%08x) + 1", offset, sd->u.seg.limit, length)); |
| || (madr > uplimit) | EXCEPTION(e, 0); |
| || (uplimit - madr < length - 1)) { | |
| EXCEPTION(e, 0); | |
| } | |
| } | } |
| break; | if (length - 1 > sd->u.seg.limit) { |
| VERBOSE(("cpu_memorywrite_check: length(%08x) - 1 > sd->u.seg.limit(%08x)", length, sd->u.seg.limit)); | |
| EXCEPTION(e, 0); | |
| } | |
| break; | |
| default: | case 6: case 7: /* rw (expand down) */ |
| uplimit = sd->d ? 0xffffffff : 0x0000ffff; | |
| if (offset <= sd->u.seg.limit) { | |
| VERBOSE(("cpu_memorywrite_check: offset(%08x) <= sd->u.seg.limit(%08x)", offset, sd->u.seg.limit)); | |
| EXCEPTION(e, 0); | |
| } | |
| if (offset > uplimit) { | |
| VERBOSE(("cpu_memorywrite_check: offset(%08x) > uplimit(%08x)", offset, uplimit)); | |
| EXCEPTION(e, 0); | EXCEPTION(e, 0); |
| break; | |
| } | } |
| if (uplimit - offset < length - 1) { | |
| VERBOSE(("cpu_memorywrite_check: uplimit(%08x) - offset(%08x) < length(%08x) - 1", uplimit, offset, length)); | |
| EXCEPTION(e, 0); | |
| } | |
| break; | |
| default: | |
| VERBOSE(("cpu_memorywrite_check: invalid type (type = %d)", sd->type)); | |
| EXCEPTION(e, 0); | |
| break; | |
| } | } |
| sd->flag |= CPU_DESC_WRITABLE; | sd->flag |= CPU_DESC_FLAG_WRITABLE; |
| } | } |
| BOOL | BOOL |
| cpu_stack_push_check(descriptor_t* sdp, DWORD esp, DWORD length) | cpu_stack_push_check(descriptor_t *sd, UINT32 esp, UINT length) |
| { | { |
| UINT32 limit; | |
| if (!CPU_STAT_PM) | if (CPU_STAT_PM) { |
| return TRUE; | if (!sd->valid || !sd->p) |
| if (!sdp->valid || !sdp->p) | |
| return FALSE; | |
| #ifdef _DEBUG | |
| if (!sdp->s || sdp->u.seg.c || !sdp->u.seg.wr) | |
| return FALSE; | |
| #endif | |
| if (!sdp->d) | |
| esp &= 0xffff; | |
| if (sdp->u.seg.ec) { | |
| DWORD limit = (sdp->d) ? 0xffffffff : 0xffff; | |
| if ((esp == 0) | |
| || (esp < length) | |
| || (esp - length <= sdp->u.seg.segend) | |
| || (esp > limit)) | |
| return FALSE; | return FALSE; |
| } else { | if (!sd->s || sd->u.seg.c || !sd->u.seg.wr) |
| /* expand-up stack */ | return FALSE; |
| if (esp == 0) { | |
| if ((sdp->d && (sdp->u.seg.segend != 0xffffffff)) | if (!sd->d) { |
| || (!sdp->d && (sdp->u.seg.segend != 0xffff))) | esp &= 0xffff; |
| return FALSE; | limit = 0xffff; |
| } else { | } else { |
| if ((esp < length) | limit = 0xffffffff; |
| || (esp - 1 > sdp->u.seg.segend)) | } |
| if (sd->u.seg.ec) { | |
| /* expand-down stack */ | |
| if ((esp == 0) | |
| || (esp < length) | |
| || (esp - length <= sd->u.seg.limit) | |
| || (esp > limit)) | |
| return FALSE; | return FALSE; |
| } else { | |
| /* expand-up stack */ | |
| if (esp == 0) { | |
| if ((sd->d && (sd->u.seg.segend != 0xffffffff)) | |
| || (!sd->d && (sd->u.seg.segend != 0xffff))) | |
| return FALSE; | |
| } else { | |
| if ((esp < length) | |
| || (esp - 1 > sd->u.seg.limit)) | |
| return FALSE; | |
| } | |
| } | } |
| } | } |
| return TRUE; | return TRUE; |
| } | } |
| BOOL | BOOL |
| cpu_stack_pop_check(descriptor_t* sdp, DWORD esp, DWORD length) | cpu_stack_pop_check(descriptor_t *sd, UINT32 esp, UINT length) |
| { | { |
| DWORD limit; | UINT32 limit; |
| if (!CPU_STAT_PM) | |
| return TRUE; | |
| if (!sdp->valid || !sdp->p) | if (CPU_STAT_PM) { |
| return FALSE; | if (!sd->valid || !sd->p) |
| #ifdef _DEBUG | |
| if (!sdp->s || sdp->u.seg.c || !sdp->u.seg.wr) | |
| return FALSE; | |
| #endif | |
| if (!sdp->d) { | |
| esp &= 0xffff; | |
| limit = 0xffff; | |
| } else { | |
| limit = 0xffffffff; | |
| } | |
| if (sdp->u.seg.ec) { | |
| if ((esp == limit) | |
| || ((limit - esp) + 1 < length)) | |
| return FALSE; | return FALSE; |
| } else { | if (!sd->s || sd->u.seg.c || !sd->u.seg.wr) |
| /* expand-up stack */ | |
| if ((esp == limit) | |
| || (sdp->u.seg.segend == 0) | |
| || (esp > sdp->u.seg.segend) | |
| || ((sdp->u.seg.segend - esp) + 1 < length)) | |
| return FALSE; | return FALSE; |
| if (!sd->d) { | |
| esp &= 0xffff; | |
| limit = 0xffff; | |
| } else { | |
| limit = 0xffffffff; | |
| } | |
| if (sd->u.seg.ec) { | |
| /* expand-down stack */ | |
| if ((esp == limit) | |
| || ((limit - esp) + 1 < length)) | |
| return FALSE; | |
| } else { | |
| /* expand-up stack */ | |
| if ((esp == limit) | |
| || (sd->u.seg.segend == 0) | |
| || (esp > sd->u.seg.limit) | |
| || ((sd->u.seg.limit - esp) + 1 < length)) | |
| return FALSE; | |
| } | |
| } | } |
| return TRUE; | return TRUE; |
| } | } |
| Line 241 cpu_stack_pop_check(descriptor_t* sdp, D | Line 232 cpu_stack_pop_check(descriptor_t* sdp, D |
| /* | /* |
| * code fetch | * code fetch |
| */ | */ |
| BYTE MEMCALL | UINT8 MEMCALL |
| cpu_codefetch(DWORD madr) | cpu_codefetch(UINT32 offset) |
| { | { |
| descriptor_t *sd; | descriptor_t *sd; |
| DWORD addr; | UINT32 addr; |
| sd = &CPU_STAT_SREG(CPU_CS_INDEX); | sd = &CPU_STAT_SREG(CPU_CS_INDEX); |
| if (!CPU_INST_AS32) | if (offset <= sd->u.seg.limit) { |
| madr &= 0xffff; | addr = CPU_STAT_SREGBASE(CPU_CS_INDEX) + offset; |
| if (madr <= sd->u.seg.segend) { | |
| addr = CPU_STAT_SREGBASE(CPU_CS_INDEX) + madr; | |
| if (!CPU_STAT_PM) | if (!CPU_STAT_PM) |
| return cpu_memoryread(addr); | return cpu_memoryread(addr); |
| return cpu_lcmemoryread(addr); | return cpu_lcmemoryread(addr); |
| Line 260 cpu_codefetch(DWORD madr) | Line 249 cpu_codefetch(DWORD madr) |
| return 0; /* compiler happy */ | return 0; /* compiler happy */ |
| } | } |
| WORD MEMCALL | UINT16 MEMCALL |
| cpu_codefetch_w(DWORD madr) | cpu_codefetch_w(UINT32 offset) |
| { | { |
| descriptor_t *sd; | descriptor_t *sd; |
| DWORD addr; | UINT32 addr; |
| sd = &CPU_STAT_SREG(CPU_CS_INDEX); | sd = &CPU_STAT_SREG(CPU_CS_INDEX); |
| if (!CPU_INST_AS32) | if (offset <= sd->u.seg.limit - 1) { |
| madr &= 0xffff; | addr = CPU_STAT_SREGBASE(CPU_CS_INDEX) + offset; |
| if (madr <= sd->u.seg.segend - 1) { | |
| addr = CPU_STAT_SREGBASE(CPU_CS_INDEX) + madr; | |
| if (!CPU_STAT_PM) | if (!CPU_STAT_PM) |
| return cpu_memoryread_w(addr); | return cpu_memoryread_w(addr); |
| return cpu_lcmemoryread_w(addr); | return cpu_lcmemoryread_w(addr); |
| Line 279 cpu_codefetch_w(DWORD madr) | Line 266 cpu_codefetch_w(DWORD madr) |
| return 0; /* compiler happy */ | return 0; /* compiler happy */ |
| } | } |
| DWORD MEMCALL | UINT32 MEMCALL |
| cpu_codefetch_d(DWORD madr) | cpu_codefetch_d(UINT32 offset) |
| { | { |
| descriptor_t *sd; | descriptor_t *sd; |
| DWORD addr; | UINT32 addr; |
| sd = &CPU_STAT_SREG(CPU_CS_INDEX); | sd = &CPU_STAT_SREG(CPU_CS_INDEX); |
| if (!CPU_INST_AS32) | if (offset <= sd->u.seg.limit - 3) { |
| madr &= 0xffff; | addr = CPU_STAT_SREGBASE(CPU_CS_INDEX) + offset; |
| if (madr <= sd->u.seg.segend - 3) { | |
| addr = CPU_STAT_SREGBASE(CPU_CS_INDEX) + madr; | |
| if (!CPU_STAT_PM) | if (!CPU_STAT_PM) |
| return cpu_memoryread_d(addr); | return cpu_memoryread_d(addr); |
| return cpu_lcmemoryread_d(addr); | return cpu_lcmemoryread_d(addr); |
| Line 302 cpu_codefetch_d(DWORD madr) | Line 287 cpu_codefetch_d(DWORD madr) |
| /* | /* |
| * virtual address -> linear address | * virtual address -> linear address |
| */ | */ |
| BYTE MEMCALL | UINT8 MEMCALL |
| cpu_vmemoryread(int idx, DWORD madr) | cpu_vmemoryread(int idx, UINT32 offset) |
| { | { |
| descriptor_t *sd; | descriptor_t *sd; |
| DWORD addr; | UINT32 addr; |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); |
| sd = &CPU_STAT_SREG(idx); | sd = &CPU_STAT_SREG(idx); |
| if (!sd->valid) { | if (!sd->valid) { |
| EXCEPTION(GP_EXCEPTION, 0); | exc = GP_EXCEPTION; |
| goto err; | |
| } | } |
| if (!CPU_INST_AS32) | if (!(sd->flag & CPU_DESC_FLAG_READABLE)) { |
| madr &= 0xffff; | cpu_memoryread_check(sd, offset, 1, |
| for (;;) { | |
| if ((sd->flag & CPU_DESC_READABLE) | |
| || (madr <= sd->u.seg.segend)) { | |
| addr = CPU_STAT_SREGBASE(idx) + madr; | |
| if (!CPU_STAT_PM) | |
| return cpu_memoryread(addr); | |
| return cpu_lmemoryread(addr); | |
| } | |
| cpu_memoryread_check(sd, madr, 1, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); |
| } else { | |
| switch (sd->type) { | |
| case 4: case 5: case 6: case 7: | |
| if (offset <= sd->u.seg.limit) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| default: | |
| if (offset > sd->u.seg.limit) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| } | |
| } | } |
| /*NOTREACHED*/ | addr = CPU_STAT_SREGBASE(idx) + offset; |
| if (!CPU_STAT_PM) | |
| return cpu_memoryread(addr); | |
| return cpu_lmemoryread(addr, CPU_STAT_USER_MODE); | |
| err: | |
| EXCEPTION(exc, 0); | |
| return 0; /* compiler happy */ | |
| } | } |
| WORD MEMCALL | UINT16 MEMCALL |
| cpu_vmemoryread_w(int idx, DWORD madr) | cpu_vmemoryread_w(int idx, UINT32 offset) |
| { | { |
| descriptor_t *sd; | descriptor_t *sd; |
| DWORD addr; | UINT32 addr; |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); |
| sd = &CPU_STAT_SREG(idx); | sd = &CPU_STAT_SREG(idx); |
| if (!sd->valid) { | if (!sd->valid) { |
| EXCEPTION(GP_EXCEPTION, 0); | exc = GP_EXCEPTION; |
| goto err; | |
| } | } |
| if (!CPU_INST_AS32) | if (!(sd->flag & CPU_DESC_FLAG_READABLE)) { |
| madr &= 0xffff; | cpu_memoryread_check(sd, offset, 2, |
| for (;;) { | |
| if ((sd->flag & CPU_DESC_READABLE) | |
| || (madr <= sd->u.seg.segend - 1)) { | |
| addr = CPU_STAT_SREGBASE(idx) + madr; | |
| if (!CPU_STAT_PM) | |
| return cpu_memoryread_w(addr); | |
| return cpu_lmemoryread_w(addr); | |
| } | |
| cpu_memoryread_check(sd, madr, 2, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); |
| } | } else { |
| /*NOTREACHED*/ | switch (sd->type) { |
| case 4: case 5: case 6: case 7: | |
| if (offset - 1 <= sd->u.seg.limit) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| default: | |
| if (offset > sd->u.seg.limit - 1) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| } | |
| } | |
| addr = CPU_STAT_SREGBASE(idx) + offset; | |
| if (!CPU_STAT_PM) | |
| return cpu_memoryread_w(addr); | |
| return cpu_lmemoryread_w(addr, CPU_STAT_USER_MODE); | |
| err: | |
| EXCEPTION(exc, 0); | |
| return 0; /* compiler happy */ | |
| } | } |
| DWORD MEMCALL | UINT32 MEMCALL |
| cpu_vmemoryread_d(int idx, DWORD madr) | cpu_vmemoryread_d(int idx, UINT32 offset) |
| { | { |
| descriptor_t *sd; | descriptor_t *sd; |
| DWORD addr; | UINT32 addr; |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); |
| sd = &CPU_STAT_SREG(idx); | sd = &CPU_STAT_SREG(idx); |
| if (!sd->valid) { | if (!sd->valid) { |
| EXCEPTION(GP_EXCEPTION, 0); | exc = GP_EXCEPTION; |
| goto err; | |
| } | } |
| if (!CPU_INST_AS32) | if (!(sd->flag & CPU_DESC_FLAG_READABLE)) { |
| madr &= 0xffff; | cpu_memoryread_check(sd, offset, 4, |
| for (;;) { | |
| if ((sd->flag & CPU_DESC_READABLE) | |
| || (madr <= sd->u.seg.segend - 3)) { | |
| addr = CPU_STAT_SREGBASE(idx) + madr; | |
| if (!CPU_STAT_PM) | |
| return cpu_memoryread_d(addr); | |
| return cpu_lmemoryread_d(addr); | |
| } | |
| cpu_memoryread_check(sd, madr, 4, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); |
| } else { | |
| switch (sd->type) { | |
| case 4: case 5: case 6: case 7: | |
| if (offset - 3 <= sd->u.seg.limit) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| default: | |
| if (offset > sd->u.seg.limit - 3) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| } | |
| } | } |
| /*NOTREACHED*/ | addr = CPU_STAT_SREGBASE(idx) + offset; |
| if (!CPU_STAT_PM) | |
| return cpu_memoryread_d(addr); | |
| return cpu_lmemoryread_d(addr, CPU_STAT_USER_MODE); | |
| err: | |
| EXCEPTION(exc, 0); | |
| return 0; /* compiler happy */ | |
| } | } |
| /* vaddr memory write */ | /* vaddr memory write */ |
| void MEMCALL | void MEMCALL |
| cpu_vmemorywrite(int idx, DWORD madr, BYTE val) | cpu_vmemorywrite(int idx, UINT32 offset, UINT8 val) |
| { | { |
| descriptor_t *sd; | descriptor_t *sd; |
| DWORD addr; | UINT32 addr; |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); |
| sd = &CPU_STAT_SREG(idx); | sd = &CPU_STAT_SREG(idx); |
| if (!sd->valid) { | if (!sd->valid) { |
| EXCEPTION(GP_EXCEPTION, 0); | exc = GP_EXCEPTION; |
| goto err; | |
| } | } |
| if (!CPU_INST_AS32) | if (!(sd->flag & CPU_DESC_FLAG_WRITABLE)) { |
| madr &= 0xffff; | cpu_memorywrite_check(sd, offset, 1, |
| for (;;) { | (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); |
| if ((sd->flag & CPU_DESC_WRITABLE) | } else { |
| || (madr <= sd->u.seg.segend)) { | switch (sd->type) { |
| addr = CPU_STAT_SREGBASE(idx) + madr; | case 6: case 7: |
| if (!CPU_STAT_PM) { | if (offset <= sd->u.seg.limit) { |
| /* real mode */ | if (idx == CPU_SS_INDEX) |
| cpu_memorywrite(addr, val); | exc = SS_EXCEPTION; |
| } else { | else |
| /* protected mode */ | exc = GP_EXCEPTION; |
| cpu_lmemorywrite(addr, val); | goto err; |
| } | |
| break; | |
| default: | |
| if (offset > sd->u.seg.limit) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | } |
| return; | break; |
| } | } |
| cpu_memorywrite_check(sd, madr, 1, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | |
| } | } |
| /*NOTREACHED*/ | addr = CPU_STAT_SREGBASE(idx) + offset; |
| if (!CPU_STAT_PM) { | |
| /* real mode */ | |
| cpu_memorywrite(addr, val); | |
| } else { | |
| /* protected mode */ | |
| cpu_lmemorywrite(addr, val, CPU_STAT_USER_MODE); | |
| } | |
| return; | |
| err: | |
| EXCEPTION(exc, 0); | |
| } | } |
| void MEMCALL | void MEMCALL |
| cpu_vmemorywrite_w(int idx, DWORD madr, WORD val) | cpu_vmemorywrite_w(int idx, UINT32 offset, UINT16 val) |
| { | { |
| descriptor_t *sd; | descriptor_t *sd; |
| DWORD addr; | UINT32 addr; |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); |
| sd = &CPU_STAT_SREG(idx); | sd = &CPU_STAT_SREG(idx); |
| if (!sd->valid) { | if (!sd->valid) { |
| EXCEPTION(GP_EXCEPTION, 0); | exc = GP_EXCEPTION; |
| goto err; | |
| } | } |
| if (!CPU_INST_AS32) | if (!(sd->flag & CPU_DESC_FLAG_WRITABLE)) { |
| madr &= 0xffff; | cpu_memorywrite_check(sd, offset, 2, |
| for (;;) { | (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); |
| if ((sd->flag & CPU_DESC_WRITABLE) | } else { |
| || (madr <= sd->u.seg.segend - 1)) { | switch (sd->type) { |
| addr = CPU_STAT_SREGBASE(idx) + madr; | case 6: case 7: |
| if (!CPU_STAT_PM) { | if (offset - 1 <= sd->u.seg.limit) { |
| /* real mode */ | if (idx == CPU_SS_INDEX) |
| cpu_memorywrite_w(addr, val); | exc = SS_EXCEPTION; |
| } else { | else |
| /* protected mode */ | exc = GP_EXCEPTION; |
| cpu_lmemorywrite_w(addr, val); | goto err; |
| } | |
| break; | |
| default: | |
| if (offset > sd->u.seg.limit - 1) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | } |
| return; | break; |
| } | } |
| cpu_memorywrite_check(sd, madr, 2, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | |
| } | } |
| /*NOTREACHED*/ | addr = CPU_STAT_SREGBASE(idx) + offset; |
| if (!CPU_STAT_PM) { | |
| /* real mode */ | |
| cpu_memorywrite_w(addr, val); | |
| } else { | |
| /* protected mode */ | |
| cpu_lmemorywrite_w(addr, val, CPU_STAT_USER_MODE); | |
| } | |
| return; | |
| err: | |
| EXCEPTION(exc, 0); | |
| } | } |
| void MEMCALL | void MEMCALL |
| cpu_vmemorywrite_d(int idx, DWORD madr, DWORD val) | cpu_vmemorywrite_d(int idx, UINT32 offset, UINT32 val) |
| { | { |
| descriptor_t *sd; | descriptor_t *sd; |
| DWORD addr; | UINT32 addr; |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); |
| sd = &CPU_STAT_SREG(idx); | sd = &CPU_STAT_SREG(idx); |
| if (!sd->valid) { | if (!sd->valid) { |
| EXCEPTION(GP_EXCEPTION, 0); | exc = GP_EXCEPTION; |
| goto err; | |
| } | } |
| if (!CPU_INST_AS32) | if (!(sd->flag & CPU_DESC_FLAG_WRITABLE)) { |
| madr &= 0xffff; | cpu_memorywrite_check(sd, offset, 4, |
| for (;;) { | |
| if ((sd->flag & CPU_DESC_WRITABLE) | |
| || (madr <= sd->u.seg.segend - 3)) { | |
| addr = CPU_STAT_SREGBASE(idx) + madr; | |
| if (!CPU_STAT_PM) { | |
| /* real mode */ | |
| cpu_memorywrite_d(addr, val); | |
| } else { | |
| /* protected mode */ | |
| cpu_lmemorywrite_d(addr, val); | |
| } | |
| return; | |
| } | |
| cpu_memorywrite_check(sd, madr, 4, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); |
| } | |
| /*NOTREACHED*/ | |
| } | |
| /* | |
| * physical address memory function | |
| */ | |
| void MEMCALL | |
| cpu_memorywrite_d(DWORD address, DWORD value) | |
| { | |
| DWORD adr = address & CPU_STAT_ADRSMASK; | |
| if (adr < LOWMEM - 3) { | |
| __i286_memorywrite_d(adr, value); | |
| } else if (adr < LOWMEM) { | |
| cpu_memorywrite_w(adr, value & 0xffff); | |
| cpu_memorywrite_w(adr + 2, (value >> 16) & 0xffff); | |
| } else { | |
| adr -= LOWMEM; | |
| if (adr < extmem_size - 3) { | |
| STOREINTELDWORD(cpumem + adr, value); | |
| } else if (adr < extmem_size) { | |
| cpu_memorywrite_w(adr, value & 0xffff); | |
| cpu_memorywrite_w(adr + 2, (value >> 16) & 0xffff); | |
| } | |
| } | |
| } | |
| void MEMCALL | |
| cpu_memorywrite_w(DWORD address, WORD value) | |
| { | |
| DWORD adr = address & CPU_STAT_ADRSMASK; | |
| if (adr < LOWMEM - 1) { | |
| __i286_memorywrite_w(adr, value); | |
| } else if (adr < LOWMEM) { | |
| __i286_memorywrite(adr, value & 0xff); | |
| cpumem[adr - (LOWMEM - 1)] = (value >> 8) & 0xff; | |
| } else { | |
| adr -= LOWMEM; | |
| if (adr < extmem_size - 1) { | |
| STOREINTELWORD(cpumem + adr, value); | |
| } else if (adr == extmem_size - 1) { | |
| cpumem[adr] = value & 0xff; | |
| } | |
| } | |
| } | |
| void MEMCALL | |
| cpu_memorywrite(DWORD address, BYTE value) | |
| { | |
| DWORD adr = address & CPU_STAT_ADRSMASK; | |
| if (adr < LOWMEM) { | |
| __i286_memorywrite(adr, value); | |
| } else { | } else { |
| adr -= LOWMEM; | switch (sd->type) { |
| if (adr < extmem_size) { | case 6: case 7: |
| cpumem[adr] = value; | if (offset - 3 <= sd->u.seg.limit) { |
| } | if (idx == CPU_SS_INDEX) |
| } | exc = SS_EXCEPTION; |
| } | else |
| exc = GP_EXCEPTION; | |
| DWORD MEMCALL | goto err; |
| cpu_memoryread_d(DWORD address) | } |
| { | break; |
| DWORD adr = address & CPU_STAT_ADRSMASK; | |
| DWORD val; | |
| if (adr < LOWMEM - 3) { | default: |
| val = __i286_memoryread_d(adr); | if (offset > sd->u.seg.limit - 3) { |
| } else if (adr < LOWMEM) { | if (idx == CPU_SS_INDEX) |
| val = cpu_memoryread_w(adr); | exc = SS_EXCEPTION; |
| val |= (DWORD)cpu_memoryread_w(adr + 2) << 16; | else |
| } else { | exc = GP_EXCEPTION; |
| adr -= LOWMEM; | goto err; |
| if (adr < extmem_size - 3) { | } |
| val = LOADINTELDWORD(cpumem + adr); | break; |
| } else { | |
| val = cpu_memoryread_w(adr); | |
| val |= (DWORD)cpu_memoryread_w(adr + 2) << 16; | |
| } | } |
| } | } |
| return val; | addr = CPU_STAT_SREGBASE(idx) + offset; |
| } | if (!CPU_STAT_PM) { |
| /* real mode */ | |
| WORD MEMCALL | cpu_memorywrite_d(addr, val); |
| cpu_memoryread_w(DWORD address) | |
| { | |
| DWORD adr = address & CPU_STAT_ADRSMASK; | |
| WORD val; | |
| if (adr < LOWMEM - 1) { | |
| val = __i286_memoryread_w(adr); | |
| } else if (adr < LOWMEM) { | |
| val = cpu_memoryread(adr); | |
| val |= (WORD)cpumem[adr - (LOWMEM - 1)] << 8; | |
| } else { | } else { |
| adr -= LOWMEM; | /* protected mode */ |
| if (adr < extmem_size - 1) { | cpu_lmemorywrite_d(addr, val, CPU_STAT_USER_MODE); |
| val = LOADINTELWORD(cpumem + adr); | |
| } else if (adr == extmem_size - 1) { | |
| val = 0xff00 | cpumem[adr]; | |
| } else { | |
| val = (WORD)-1; | |
| } | |
| } | } |
| return val; | return; |
| } | |
| BYTE MEMCALL | err: |
| cpu_memoryread(DWORD address) | EXCEPTION(exc, 0); |
| { | |
| DWORD adr = address & CPU_STAT_ADRSMASK; | |
| BYTE val; | |
| if (adr < LOWMEM) { | |
| val = __i286_memoryread(adr); | |
| } else { | |
| adr -= LOWMEM; | |
| if (adr < extmem_size) { | |
| val = cpumem[adr]; | |
| } else { | |
| val = (BYTE)-1; | |
| } | |
| } | |
| return val; | |
| } | } |