|
|
| version 1.12, 2004/03/05 14:17:35 | version 1.17, 2004/03/25 15:08:32 |
|---|---|
| Line 1 | Line 1 |
| /* $Id$ */ | /* $Id$ */ |
| /* | /* |
| * Copyright (c) 2002-2003 NONAKA Kimihiro | * Copyright (c) 2002-2004 NONAKA Kimihiro |
| * All rights reserved. | * All rights reserved. |
| * | * |
| * Redistribution and use in source and binary forms, with or without | * Redistribution and use in source and binary forms, with or without |
| Line 154 cpu_memorywrite_check(descriptor_t *sd, | Line 154 cpu_memorywrite_check(descriptor_t *sd, |
| sd->flag |= CPU_DESC_FLAG_WRITABLE; | sd->flag |= CPU_DESC_FLAG_WRITABLE; |
| } | } |
| BOOL | void |
| cpu_stack_push_check(descriptor_t *sd, UINT32 esp, UINT length) | cpu_stack_push_check(UINT16 s, descriptor_t *sd, UINT32 esp, UINT length) |
| { | { |
| UINT32 limit; | UINT32 limit; |
| if (CPU_STAT_PM) { | if (CPU_STAT_PM) { |
| if (!sd->valid || !sd->p) | if (!sd->valid || !sd->p) { |
| return FALSE; | VERBOSE(("cpu_stack_push_check: valid = %d, present = %d", sd->valid, sd->p)); |
| if (!sd->s || sd->u.seg.c || !sd->u.seg.wr) | EXCEPTION(SS_EXCEPTION, s & 0xfffc); |
| return FALSE; | } |
| if (!sd->s || sd->u.seg.c || !sd->u.seg.wr) { | |
| VERBOSE(("cpu_stack_push_check: s = %d, c = %d, wr", sd->s, sd->u.seg.c, sd->u.seg.wr)); | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | |
| } | |
| if (!sd->d) { | if (!sd->d) { |
| esp &= 0xffff; | |
| limit = 0xffff; | limit = 0xffff; |
| } else { | } else { |
| limit = 0xffffffff; | limit = 0xffffffff; |
| Line 176 cpu_stack_push_check(descriptor_t *sd, U | Line 179 cpu_stack_push_check(descriptor_t *sd, U |
| if ((esp == 0) | if ((esp == 0) |
| || (esp < length) | || (esp < length) |
| || (esp - length <= sd->u.seg.limit) | || (esp - length <= sd->u.seg.limit) |
| || (esp > limit)) | || (esp > limit)) { |
| return FALSE; | VERBOSE(("cpu_stack_push_check: expand-down, esp = %08x, length = %08x", esp, length)); |
| VERBOSE(("cpu_stack_push_check: limit = %08x, seglimit = %08x", limit, sd->u.seg.limit)); | |
| VERBOSE(("cpu_stack_push_check: segbase = %08x, segend = %08x", sd->u.seg.segbase, sd->u.seg.segend)); | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | |
| } | |
| } else { | } else { |
| /* expand-up stack */ | /* expand-up stack */ |
| if (esp == 0) { | if (esp == 0) { |
| if ((sd->d && (sd->u.seg.segend != 0xffffffff)) | if ((sd->d && (sd->u.seg.segend != 0xffffffff)) |
| || (!sd->d && (sd->u.seg.segend != 0xffff))) | || (!sd->d && (sd->u.seg.segend != 0xffff))) { |
| return FALSE; | VERBOSE(("cpu_stack_push_check: expand-up, esp = %08x, length = %08x", esp, length)); |
| VERBOSE(("cpu_stack_push_check: limit = %08x, seglimit = %08x", limit, sd->u.seg.limit)); | |
| VERBOSE(("cpu_stack_push_check: segbase = %08x, segend = %08x", sd->u.seg.segbase, sd->u.seg.segend)); | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | |
| } | |
| } else { | } else { |
| if ((esp < length) | if ((esp < length) |
| || (esp - 1 > sd->u.seg.limit)) | || (esp - 1 > sd->u.seg.limit)) { |
| return FALSE; | VERBOSE(("cpu_stack_push_check: expand-up, esp = %08x, length = %08x", esp, length)); |
| VERBOSE(("cpu_stack_push_check: limit = %08x, seglimit = %08x", limit, sd->u.seg.limit)); | |
| VERBOSE(("cpu_stack_push_check: segbase = %08x, segend = %08x", sd->u.seg.segbase, sd->u.seg.segend)); | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | |
| } | |
| } | } |
| } | } |
| } | } |
| return TRUE; | |
| } | } |
| BOOL | void |
| cpu_stack_pop_check(descriptor_t *sd, UINT32 esp, UINT length) | cpu_stack_pop_check(UINT16 s, descriptor_t *sd, UINT32 esp, UINT length) |
| { | { |
| UINT32 limit; | UINT32 limit; |
| if (CPU_STAT_PM) { | if (CPU_STAT_PM) { |
| if (!sd->valid || !sd->p) | if (!sd->valid || !sd->p) { |
| return FALSE; | VERBOSE(("cpu_stack_pop_check: valid = %d, present = %d", sd->valid, sd->p)); |
| if (!sd->s || sd->u.seg.c || !sd->u.seg.wr) | EXCEPTION(SS_EXCEPTION, s & 0xfffc); |
| return FALSE; | } |
| if (!sd->s || sd->u.seg.c || !sd->u.seg.wr) { | |
| VERBOSE(("cpu_stack_pop_check: s = %d, c = %d, wr", sd->s, sd->u.seg.c, sd->u.seg.wr)); | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | |
| } | |
| if (!sd->d) { | if (!sd->d) { |
| esp &= 0xffff; | |
| limit = 0xffff; | limit = 0xffff; |
| } else { | } else { |
| limit = 0xffffffff; | limit = 0xffffffff; |
| Line 214 cpu_stack_pop_check(descriptor_t *sd, UI | Line 231 cpu_stack_pop_check(descriptor_t *sd, UI |
| if (sd->u.seg.ec) { | if (sd->u.seg.ec) { |
| /* expand-down stack */ | /* expand-down stack */ |
| if ((esp == limit) | if ((esp == limit) |
| || ((limit - esp) + 1 < length)) | || ((limit - esp) + 1 < length)) { |
| return FALSE; | VERBOSE(("cpu_stack_pop_check: expand-up, esp = %08x, length = %08x", esp, length)); |
| VERBOSE(("cpu_stack_pop_check: limit = %08x, seglimit = %08x", limit, sd->u.seg.limit)); | |
| VERBOSE(("cpu_stack_pop_check: segbase = %08x, segend = %08x", sd->u.seg.segbase, sd->u.seg.segend)); | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | |
| } | |
| } else { | } else { |
| /* expand-up stack */ | /* expand-up stack */ |
| if ((esp == limit) | if ((esp == limit) |
| || (sd->u.seg.segend == 0) | || (sd->u.seg.segend == 0) |
| || (esp > sd->u.seg.limit) | || (esp > sd->u.seg.limit) |
| || ((sd->u.seg.limit - esp) + 1 < length)) | || ((sd->u.seg.limit - esp) + 1 < length)) { |
| return FALSE; | VERBOSE(("cpu_stack_pop_check: expand-up, esp = %08x, length = %08x", esp, length)); |
| VERBOSE(("cpu_stack_pop_check: limit = %08x, seglimit = %08x", limit, sd->u.seg.limit)); | |
| VERBOSE(("cpu_stack_pop_check: segbase = %08x, segend = %08x", sd->u.seg.segbase, sd->u.seg.segend)); | |
| EXCEPTION(SS_EXCEPTION, s & 0xfffc); | |
| } | |
| } | } |
| } | } |
| return TRUE; | |
| } | } |
| Line 241 cpu_prefetch(UINT32 address) | Line 265 cpu_prefetch(UINT32 address) |
| UINT offset = address & CPU_PREFETCHQ_MASK; | UINT offset = address & CPU_PREFETCHQ_MASK; |
| UINT length = CPU_PREFETCH_QUEUE_LENGTH - offset; | UINT length = CPU_PREFETCH_QUEUE_LENGTH - offset; |
| cpu_memory_access_la_region(address, length, CPU_PAGE_READ_CODE, CPU_STAT_USER_MODE, CPU_PREFETCHQ + offset); | cpu_memory_access_la_region(address, length, CPU_PAGE_READ_CODE|CPU_STAT_USER_MODE, CPU_PREFETCHQ + offset); |
| CPU_PREFETCHQ_REMAIN = length; | CPU_PREFETCHQ_REMAIN = (SINT8)length; |
| } | } |
| INLINE static MEMCALL UINT8 | INLINE static MEMCALL UINT8 |
| Line 276 cpu_prefetchq_3(UINT32 address) | Line 300 cpu_prefetchq_3(UINT32 address) |
| CPU_PREFETCHQ_REMAIN -= 3; | CPU_PREFETCHQ_REMAIN -= 3; |
| p = CPU_PREFETCHQ + (address & CPU_PREFETCHQ_MASK); | p = CPU_PREFETCHQ + (address & CPU_PREFETCHQ_MASK); |
| v = LOADINTELWORD(p); | v = LOADINTELWORD(p); |
| v += (UINT32)*p << 16; | v += ((UINT32)p[2]) << 16; |
| return v; | return v; |
| } | } |
| Line 293 cpu_prefetchq_d(UINT32 address) | Line 317 cpu_prefetchq_d(UINT32 address) |
| } | } |
| #endif /* IA32_SUPPORT_PREFETCH_QUEUE */ | #endif /* IA32_SUPPORT_PREFETCH_QUEUE */ |
| #if defined(IA32_SUPPORT_DEBUG_REGISTER) | |
| INLINE static void | |
| check_memory_break_point(UINT32 address, UINT length, UINT rw) | |
| { | |
| int i; | |
| if (CPU_STAT_BP && !(CPU_EFLAG & RF_FLAG)) { | |
| for (i = 0; i < CPU_DEBUG_REG_INDEX_NUM; i++) { | |
| if ((CPU_STAT_BP & (1 << i)) | |
| && (CPU_DR7_GET_RW(i) & rw) | |
| && ((address <= CPU_DR(i) && address + length > CPU_DR(i)) | |
| || (address > CPU_DR(i) && address < CPU_DR(i) + CPU_DR7_GET_LEN(i)))) { | |
| CPU_STAT_BP_EVENT |= CPU_STAT_BP_EVENT_B(i); | |
| } | |
| } | |
| } | |
| } | |
| #else | |
| #define check_memory_break_point(address, length, rw) | |
| #endif | |
| /* | /* |
| * code fetch | * code fetch |
| */ | */ |
| Line 306 cpu_codefetch(UINT32 offset) | Line 352 cpu_codefetch(UINT32 offset) |
| if (offset <= sd->u.seg.limit) { | if (offset <= sd->u.seg.limit) { |
| addr = sd->u.seg.segbase + offset; | addr = sd->u.seg.segbase + offset; |
| #if defined(IA32_SUPPORT_PREFETCH_QUEUE) | #if defined(IA32_SUPPORT_PREFETCH_QUEUE) |
| if (CPU_PREFETCHQ_REMAIN == 0) { | if (CPU_PREFETCHQ_REMAIN <= 0) { |
| cpu_prefetch(addr); | cpu_prefetch(addr); |
| } | } |
| return cpu_prefetchq(addr); | return cpu_prefetchq(addr); |
| #else /* IA32_SUPPORT_PREFETCH_QUEUE */ | #else /* !IA32_SUPPORT_PREFETCH_QUEUE */ |
| if (!CPU_STAT_PM) | if (!CPU_STAT_PAGING) |
| return cpu_memoryread(addr); | return cpu_memoryread(addr); |
| return cpu_lcmemoryread(addr); | return cpu_linear_memory_read_b(addr, CPU_PAGE_READ_CODE | CPU_STAT_USER_MODE); |
| #endif /* IA32_SUPPORT_PREFETCH_QUEUE */ | #endif /* IA32_SUPPORT_PREFETCH_QUEUE */ |
| } | } |
| EXCEPTION(GP_EXCEPTION, 0); | EXCEPTION(GP_EXCEPTION, 0); |
| Line 333 cpu_codefetch_w(UINT32 offset) | Line 379 cpu_codefetch_w(UINT32 offset) |
| if (offset <= sd->u.seg.limit - 1) { | if (offset <= sd->u.seg.limit - 1) { |
| addr = sd->u.seg.segbase + offset; | addr = sd->u.seg.segbase + offset; |
| #if defined(IA32_SUPPORT_PREFETCH_QUEUE) | #if defined(IA32_SUPPORT_PREFETCH_QUEUE) |
| if (CPU_PREFETCHQ_REMAIN == 0) { | if (CPU_PREFETCHQ_REMAIN <= 0) { |
| cpu_prefetch(addr); | cpu_prefetch(addr); |
| } | } |
| if (CPU_PREFETCHQ_REMAIN >= 2) { | if (CPU_PREFETCHQ_REMAIN >= 2) { |
| Line 343 cpu_codefetch_w(UINT32 offset) | Line 389 cpu_codefetch_w(UINT32 offset) |
| v = cpu_prefetchq(addr); | v = cpu_prefetchq(addr); |
| addr++; | addr++; |
| cpu_prefetch(addr); | cpu_prefetch(addr); |
| v |= cpu_prefetchq(addr) << 8; | v += (UINT16)cpu_prefetchq(addr) << 8; |
| return v; | return v; |
| #else /* IA32_SUPPORT_PREFETCH_QUEUE */ | #else /* !IA32_SUPPORT_PREFETCH_QUEUE */ |
| if (!CPU_STAT_PM) | if (!CPU_STAT_PAGING) |
| return cpu_memoryread_w(addr); | return cpu_memoryread_w(addr); |
| return cpu_lcmemoryread_w(addr); | return cpu_linear_memory_read_w(addr, CPU_PAGE_READ_CODE | CPU_STAT_USER_MODE); |
| #endif /* IA32_SUPPORT_PREFETCH_QUEUE */ | #endif /* IA32_SUPPORT_PREFETCH_QUEUE */ |
| } | } |
| EXCEPTION(GP_EXCEPTION, 0); | EXCEPTION(GP_EXCEPTION, 0); |
| Line 368 cpu_codefetch_d(UINT32 offset) | Line 414 cpu_codefetch_d(UINT32 offset) |
| if (offset <= sd->u.seg.limit - 3) { | if (offset <= sd->u.seg.limit - 3) { |
| addr = sd->u.seg.segbase + offset; | addr = sd->u.seg.segbase + offset; |
| #if defined(IA32_SUPPORT_PREFETCH_QUEUE) | #if defined(IA32_SUPPORT_PREFETCH_QUEUE) |
| if (CPU_PREFETCHQ_REMAIN == 0) { | if (CPU_PREFETCHQ_REMAIN <= 0) { |
| cpu_prefetch(addr); | cpu_prefetch(addr); |
| } | } |
| if (CPU_PREFETCHQ_REMAIN >= 4) { | if (CPU_PREFETCHQ_REMAIN >= 4) { |
| Line 377 cpu_codefetch_d(UINT32 offset) | Line 423 cpu_codefetch_d(UINT32 offset) |
| switch (CPU_PREFETCHQ_REMAIN) { | switch (CPU_PREFETCHQ_REMAIN) { |
| case 1: | case 1: |
| v = cpu_prefetchq(addr); | v = cpu_prefetchq(addr); |
| cpu_prefetch(addr + 1); | addr++; |
| v += (UINT32)cpu_prefetchq_3(addr + 1) << 8; | cpu_prefetch(addr); |
| v += (UINT32)cpu_prefetchq_3(addr) << 8; | |
| break; | break; |
| case 2: | case 2: |
| v = cpu_prefetchq_w(addr); | v = cpu_prefetchq_w(addr); |
| cpu_prefetch(addr + 2); | addr += 2; |
| v += (UINT32)cpu_prefetchq_w(addr + 2) << 16; | cpu_prefetch(addr); |
| v += (UINT32)cpu_prefetchq_w(addr) << 16; | |
| break; | break; |
| case 3: | case 3: |
| v = cpu_prefetchq_3(addr); | v = cpu_prefetchq_3(addr); |
| cpu_prefetch(addr + 3); | addr += 3; |
| v += (UINT32)cpu_prefetchq(addr + 3) << 24; | cpu_prefetch(addr); |
| v += (UINT32)cpu_prefetchq(addr) << 24; | |
| break; | |
| default: | |
| ia32_panic("cpu_codefetch_d: remain bytes is invalid"); | |
| v = 0; /* compiler happy */ | |
| break; | break; |
| } | } |
| return v; | return v; |
| } | } |
| #else /* IA32_SUPPORT_PREFETCH_QUEUE */ | #else /* !IA32_SUPPORT_PREFETCH_QUEUE */ |
| if (!CPU_STAT_PM) | if (!CPU_STAT_PAGING) |
| return cpu_memoryread_d(addr); | return cpu_memoryread_d(addr); |
| return cpu_lcmemoryread_d(addr); | return cpu_linear_memory_read_d(addr, CPU_PAGE_READ_CODE | CPU_STAT_USER_MODE); |
| #endif /* IA32_SUPPORT_PREFETCH_QUEUE */ | #endif /* IA32_SUPPORT_PREFETCH_QUEUE */ |
| } | } |
| EXCEPTION(GP_EXCEPTION, 0); | EXCEPTION(GP_EXCEPTION, 0); |
| Line 407 cpu_codefetch_d(UINT32 offset) | Line 461 cpu_codefetch_d(UINT32 offset) |
| /* | /* |
| * virtual address -> linear address | * virtual address memory access functions |
| */ | */ |
| UINT8 MEMCALL | #include "cpu_mem.mcr" |
| cpu_vmemoryread(int idx, UINT32 offset) | |
| { | |
| descriptor_t *sd; | |
| UINT32 addr; | |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | |
| sd = &CPU_STAT_SREG(idx); | |
| if (!sd->valid) { | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| if (!(sd->flag & CPU_DESC_FLAG_READABLE)) { | |
| cpu_memoryread_check(sd, offset, 1, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | |
| } else { | |
| switch (sd->type) { | |
| case 4: case 5: case 6: case 7: | |
| if (offset <= sd->u.seg.limit) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| default: | |
| if (offset > sd->u.seg.limit) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| } | |
| } | |
| addr = sd->u.seg.segbase + offset; | |
| if (!CPU_STAT_PM) | |
| return cpu_memoryread(addr); | |
| return cpu_lmemoryread(addr, CPU_STAT_USER_MODE); | |
| err: | |
| EXCEPTION(exc, 0); | |
| return 0; /* compiler happy */ | |
| } | |
| UINT16 MEMCALL | |
| cpu_vmemoryread_w(int idx, UINT32 offset) | |
| { | |
| descriptor_t *sd; | |
| UINT32 addr; | |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | |
| sd = &CPU_STAT_SREG(idx); | |
| if (!sd->valid) { | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| if (!(sd->flag & CPU_DESC_FLAG_READABLE)) { | |
| cpu_memoryread_check(sd, offset, 2, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | |
| } else { | |
| switch (sd->type) { | |
| case 4: case 5: case 6: case 7: | |
| if (offset - 1 <= sd->u.seg.limit) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| default: | |
| if (offset > sd->u.seg.limit - 1) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| } | |
| } | |
| addr = sd->u.seg.segbase + offset; | |
| if (!CPU_STAT_PM) | |
| return cpu_memoryread_w(addr); | |
| return cpu_lmemoryread_w(addr, CPU_STAT_USER_MODE); | |
| err: | |
| EXCEPTION(exc, 0); | |
| return 0; /* compiler happy */ | |
| } | |
| UINT32 MEMCALL | |
| cpu_vmemoryread_d(int idx, UINT32 offset) | |
| { | |
| descriptor_t *sd; | |
| UINT32 addr; | |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | |
| sd = &CPU_STAT_SREG(idx); | |
| if (!sd->valid) { | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| if (!(sd->flag & CPU_DESC_FLAG_READABLE)) { | |
| cpu_memoryread_check(sd, offset, 4, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | |
| } else { | |
| switch (sd->type) { | |
| case 4: case 5: case 6: case 7: | |
| if (offset - 3 <= sd->u.seg.limit) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| default: | |
| if (offset > sd->u.seg.limit - 3) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| } | |
| } | |
| addr = sd->u.seg.segbase + offset; | |
| if (!CPU_STAT_PM) | |
| return cpu_memoryread_d(addr); | |
| return cpu_lmemoryread_d(addr, CPU_STAT_USER_MODE); | |
| err: | |
| EXCEPTION(exc, 0); | |
| return 0; /* compiler happy */ | |
| } | |
| /* vaddr memory write */ | |
| void MEMCALL | |
| cpu_vmemorywrite(int idx, UINT32 offset, UINT8 val) | |
| { | |
| descriptor_t *sd; | |
| UINT32 addr; | |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | |
| sd = &CPU_STAT_SREG(idx); | VIRTUAL_ADDRESS_MEMORY_ACCESS_FUNCTION(b, UINT8, 1) |
| if (!sd->valid) { | VIRTUAL_ADDRESS_MEMORY_ACCESS_FUNCTION(w, UINT16, 2) |
| exc = GP_EXCEPTION; | VIRTUAL_ADDRESS_MEMORY_ACCESS_FUNCTION(d, UINT32, 4) |
| goto err; | |
| } | |
| if (!(sd->flag & CPU_DESC_FLAG_WRITABLE)) { | |
| cpu_memorywrite_check(sd, offset, 1, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | |
| } else { | |
| switch (sd->type) { | |
| case 6: case 7: | |
| if (offset <= sd->u.seg.limit) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| default: | |
| if (offset > sd->u.seg.limit) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| } | |
| } | |
| addr = sd->u.seg.segbase + offset; | |
| if (!CPU_STAT_PM) { | |
| /* real mode */ | |
| cpu_memorywrite(addr, val); | |
| } else { | |
| /* protected mode */ | |
| cpu_lmemorywrite(addr, val, CPU_STAT_USER_MODE); | |
| } | |
| return; | |
| err: | |
| EXCEPTION(exc, 0); | |
| } | |
| void MEMCALL | |
| cpu_vmemorywrite_w(int idx, UINT32 offset, UINT16 val) | |
| { | |
| descriptor_t *sd; | |
| UINT32 addr; | |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | |
| sd = &CPU_STAT_SREG(idx); | |
| if (!sd->valid) { | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| if (!(sd->flag & CPU_DESC_FLAG_WRITABLE)) { | |
| cpu_memorywrite_check(sd, offset, 2, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | |
| } else { | |
| switch (sd->type) { | |
| case 6: case 7: | |
| if (offset - 1 <= sd->u.seg.limit) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| default: | |
| if (offset > sd->u.seg.limit - 1) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| } | |
| } | |
| addr = sd->u.seg.segbase + offset; | |
| if (!CPU_STAT_PM) { | |
| /* real mode */ | |
| cpu_memorywrite_w(addr, val); | |
| } else { | |
| /* protected mode */ | |
| cpu_lmemorywrite_w(addr, val, CPU_STAT_USER_MODE); | |
| } | |
| return; | |
| err: | |
| EXCEPTION(exc, 0); | |
| } | |
| void MEMCALL | |
| cpu_vmemorywrite_d(int idx, UINT32 offset, UINT32 val) | |
| { | |
| descriptor_t *sd; | |
| UINT32 addr; | |
| int exc; | |
| __ASSERT((unsigned int)idx < CPU_SEGREG_NUM); | |
| sd = &CPU_STAT_SREG(idx); | |
| if (!sd->valid) { | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| if (!(sd->flag & CPU_DESC_FLAG_WRITABLE)) { | |
| cpu_memorywrite_check(sd, offset, 4, | |
| (idx == CPU_SS_INDEX) ? SS_EXCEPTION : GP_EXCEPTION); | |
| } else { | |
| switch (sd->type) { | |
| case 6: case 7: | |
| if (offset - 3 <= sd->u.seg.limit) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| default: | |
| if (offset > sd->u.seg.limit - 3) { | |
| if (idx == CPU_SS_INDEX) | |
| exc = SS_EXCEPTION; | |
| else | |
| exc = GP_EXCEPTION; | |
| goto err; | |
| } | |
| break; | |
| } | |
| } | |
| addr = sd->u.seg.segbase + offset; | |
| if (!CPU_STAT_PM) { | |
| /* real mode */ | |
| cpu_memorywrite_d(addr, val); | |
| } else { | |
| /* protected mode */ | |
| cpu_lmemorywrite_d(addr, val, CPU_STAT_USER_MODE); | |
| } | |
| return; | |
| err: | |
| EXCEPTION(exc, 0); | |
| } |