|
|
| version 1.8, 2004/02/04 13:24:35 | version 1.16, 2004/03/08 12:56:22 |
|---|---|
| Line 33 | Line 33 |
| void | void |
| load_tr(WORD selector) | load_tr(UINT16 selector) |
| { | { |
| selector_t task_sel; | selector_t task_sel; |
| int rv; | int rv; |
| int i; | |
| UINT16 iobase; | |
| rv = parse_selector_user(&task_sel, selector); | rv = parse_selector(&task_sel, selector); |
| if (rv < 0 || task_sel.ldt || task_sel.desc.s) { | if (rv < 0 || task_sel.ldt || task_sel.desc.s) { |
| EXCEPTION(GP_EXCEPTION, task_sel.idx); | EXCEPTION(GP_EXCEPTION, task_sel.idx); |
| } | } |
| Line 49 load_tr(WORD selector) | Line 51 load_tr(WORD selector) |
| if (task_sel.desc.u.seg.limit < 0x2b) { | if (task_sel.desc.u.seg.limit < 0x2b) { |
| EXCEPTION(TS_EXCEPTION, task_sel.idx); | EXCEPTION(TS_EXCEPTION, task_sel.idx); |
| } | } |
| iobase = 0; | |
| break; | break; |
| case CPU_SYSDESC_TYPE_TSS_32: | case CPU_SYSDESC_TYPE_TSS_32: |
| if (task_sel.desc.u.seg.limit < 0x67) { | if (task_sel.desc.u.seg.limit < 0x67) { |
| EXCEPTION(TS_EXCEPTION, task_sel.idx); | EXCEPTION(TS_EXCEPTION, task_sel.idx); |
| } | } |
| iobase = cpu_kmemoryread_w(task_sel.desc.u.seg.segbase + 102); | |
| break; | break; |
| default: | default: |
| EXCEPTION(GP_EXCEPTION, task_sel.idx); | EXCEPTION(GP_EXCEPTION, task_sel.idx); |
| iobase = 0; /* compiler happy */ | |
| break; | break; |
| } | } |
| Line 75 load_tr(WORD selector) | Line 80 load_tr(WORD selector) |
| CPU_SET_TASK_BUSY(task_sel.selector, &task_sel.desc); | CPU_SET_TASK_BUSY(task_sel.selector, &task_sel.desc); |
| CPU_TR = task_sel.selector; | CPU_TR = task_sel.selector; |
| CPU_TR_DESC = task_sel.desc; | CPU_TR_DESC = task_sel.desc; |
| /* I/O deny bitmap */ | |
| if (task_sel.desc.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { | |
| if (iobase != 0 && iobase < task_sel.desc.u.seg.limit) { | |
| CPU_STAT_IOLIMIT = (UINT16)(task_sel.desc.u.seg.limit - iobase); | |
| CPU_STAT_IOADDR = task_sel.desc.u.seg.segbase + iobase; | |
| } else { | |
| CPU_STAT_IOLIMIT = 0; | |
| } | |
| } else { | |
| CPU_STAT_IOLIMIT = 0; | |
| } | |
| /* clear local break point flags */ | |
| CPU_DR7 &= ~(CPU_DR7_L(0)|CPU_DR7_L(1)|CPU_DR7_L(2)|CPU_DR7_L(3)); | |
| CPU_STAT_BP = 0; | |
| for (i = 0; i < CPU_DEBUG_REG_INDEX_NUM; i++) { | |
| if (CPU_DR7 & CPU_DR7_G(i)) { | |
| CPU_STAT_BP |= (1 << i); | |
| } | |
| } | |
| } | } |
| void | void |
| get_stack_from_tss(DWORD pl, WORD *new_ss, DWORD *new_esp) | get_stack_pointer_from_tss(UINT pl, UINT16 *new_ss, UINT32 *new_esp) |
| { | { |
| DWORD tss_stack_addr; | UINT32 tss_stack_addr; |
| __ASSERT(pl < 3); | __ASSERT(pl < 3); |
| Line 101 get_stack_from_tss(DWORD pl, WORD *new_s | Line 127 get_stack_from_tss(DWORD pl, WORD *new_s |
| *new_esp = cpu_kmemoryread_w(tss_stack_addr); | *new_esp = cpu_kmemoryread_w(tss_stack_addr); |
| *new_ss = cpu_kmemoryread_w(tss_stack_addr + 2); | *new_ss = cpu_kmemoryread_w(tss_stack_addr + 2); |
| } else { | } else { |
| ia32_panic("get_stack_from_tss: task register is invalid (%d)\n", CPU_TR_DESC.type); | ia32_panic("get_stack_pointer_from_tss: task register is invalid (%d)\n", CPU_TR_DESC.type); |
| } | } |
| VERBOSE(("get_stack_from_tss: pl = %d, new_esp = 0x%08x, new_ss = 0x%04x", pl, *new_esp, *new_ss)); | VERBOSE(("get_stack_pointer_from_tss: pl = %d, new_esp = 0x%08x, new_ss = 0x%04x", pl, *new_esp, *new_ss)); |
| } | } |
| WORD | UINT16 |
| get_link_selector_from_tss() | get_backlink_selector_from_tss(void) |
| { | { |
| WORD backlink; | UINT16 backlink; |
| if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { | if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { |
| if (4 > CPU_TR_DESC.u.seg.limit) { | if (4 > CPU_TR_DESC.u.seg.limit) { |
| Line 121 get_link_selector_from_tss() | Line 147 get_link_selector_from_tss() |
| EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); | EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); |
| } | } |
| } else { | } else { |
| ia32_panic("get_link_selector_from_tss: task register is invalid (%d)\n", CPU_TR_DESC.type); | ia32_panic("get_backlink_selector_from_tss: task register is invalid (%d)\n", CPU_TR_DESC.type); |
| } | } |
| backlink = cpu_kmemoryread_w(CPU_TR_DESC.u.seg.segbase); | backlink = cpu_kmemoryread_w(CPU_TR_DESC.u.seg.segbase); |
| VERBOSE(("get_link_selector_from_tss: backlink selector = 0x%04x", backlink)); | VERBOSE(("get_backlink_selector_from_tss: backlink selector = 0x%04x", backlink)); |
| return backlink; | return backlink; |
| } | } |
| void | void |
| task_switch(selector_t* task_sel, int type) | task_switch(selector_t *task_sel, task_switch_type_t type) |
| { | { |
| DWORD regs[CPU_REG_NUM]; | UINT32 regs[CPU_REG_NUM]; |
| DWORD eip; | UINT32 eip; |
| DWORD new_flags; | UINT32 new_flags; |
| DWORD cr3 = 0; | UINT32 mask; |
| WORD sreg[CPU_SEGREG_NUM]; | UINT32 cr3 = 0; |
| WORD ldtr; | UINT16 sreg[CPU_SEGREG_NUM]; |
| WORD t, iobase; | UINT16 ldtr; |
| UINT16 iobase; | |
| #if defined(IA32_SUPPORT_DEBUG_REGISTER) | |
| UINT16 t; | |
| #endif | |
| selector_t cs_sel; | selector_t cs_sel; |
| int rv; | int rv; |
| DWORD cur_base; /* current task state */ | UINT32 cur_base; /* current task state */ |
| DWORD task_base; /* new task state */ | UINT32 task_base; /* new task state */ |
| DWORD old_flags = REAL_EFLAGREG; | UINT32 old_flags = REAL_EFLAGREG; |
| BOOL task16; | BOOL task16; |
| DWORD nsreg; | UINT nsreg; |
| DWORD i; | UINT i; |
| VERBOSE(("task_switch: start")); | VERBOSE(("task_switch: start")); |
| cur_base = CPU_TR_DESC.u.seg.segbase; | |
| task_base = task_sel->desc.u.seg.segbase; | |
| VERBOSE(("task_switch: current task base address = 0x%08x", cur_base)); | |
| VERBOSE(("task_switch: new task base address = 0x%08x", task_base)); | |
| /* limit check */ | /* limit check */ |
| switch (task_sel->desc.type) { | switch (task_sel->desc.type) { |
| case CPU_SYSDESC_TYPE_TSS_32: | case CPU_SYSDESC_TYPE_TSS_32: |
| Line 184 task_switch(selector_t* task_sel, int ty | Line 209 task_switch(selector_t* task_sel, int ty |
| break; | break; |
| } | } |
| cur_base = CPU_TR_DESC.u.seg.segbase; | |
| task_base = task_sel->desc.u.seg.segbase; | |
| VERBOSE(("task_switch: cur task (%04x) = 0x%08x:%08x", CPU_TR, cur_base, CPU_TR_DESC.u.seg.limit)); | |
| VERBOSE(("task_switch: new task (%04x) = 0x%08x:%08x", task_sel->selector, task_base, task_sel->desc.u.seg.limit)); | |
| VERBOSE(("task_switch: %dbit task switch", task16 ? 16 : 32)); | |
| #if defined(MORE_DEBUG) | #if defined(MORE_DEBUG) |
| { | { |
| DWORD v; | UINT32 v; |
| VERBOSE(("task_switch: new task")); | VERBOSE(("task_switch: new task")); |
| for (i = 0; i < task_sel->desc.u.seg.limit; i += 4) { | for (i = 0; i < task_sel->desc.u.seg.limit; i += 4) { |
| Line 217 task_switch(selector_t* task_sel, int ty | Line 248 task_switch(selector_t* task_sel, int ty |
| sreg[i] = cpu_kmemoryread_w(task_base + 72 + i * 4); | sreg[i] = cpu_kmemoryread_w(task_base + 72 + i * 4); |
| } | } |
| ldtr = cpu_kmemoryread_w(task_base + 96); | ldtr = cpu_kmemoryread_w(task_base + 96); |
| #if defined(IA32_SUPPORT_DEBUG_REGISTER) | |
| t = cpu_kmemoryread_w(task_base + 100); | t = cpu_kmemoryread_w(task_base + 100); |
| t &= 1; | if (t & 1) { |
| CPU_STAT_BP_EVENT |= CPU_STAT_BP_EVENT_TASK; | |
| } | |
| #endif | |
| iobase = cpu_kmemoryread_w(task_base + 102); | iobase = cpu_kmemoryread_w(task_base + 102); |
| } else { | } else { |
| eip = cpu_kmemoryread_w(task_base + 14); | eip = cpu_kmemoryread_w(task_base + 14); |
| Line 230 task_switch(selector_t* task_sel, int ty | Line 265 task_switch(selector_t* task_sel, int ty |
| sreg[i] = cpu_kmemoryread_w(task_base + 34 + i * 2); | sreg[i] = cpu_kmemoryread_w(task_base + 34 + i * 2); |
| } | } |
| ldtr = cpu_kmemoryread_w(task_base + 42); | ldtr = cpu_kmemoryread_w(task_base + 42); |
| t = 0; | |
| iobase = 0; | iobase = 0; |
| } | } |
| #if defined(MORE_DEBUG) | #if defined(DEBUG) |
| VERBOSE(("task_switch: %dbit task", task16 ? 16 : 32)); | VERBOSE(("task_switch: current task")); |
| VERBOSE(("task_switch: CR3 = 0x%08x", cr3)); | VERBOSE(("task_switch: eip = 0x%08x", CPU_EIP)); |
| VERBOSE(("task_switch: eflags = 0x%08x", old_flags)); | |
| for (i = 0; i < CPU_REG_NUM; i++) { | |
| VERBOSE(("task_switch: regs[%d] = 0x%08x", i, CPU_REGS_DWORD(i))); | |
| } | |
| for (i = 0; i < nsreg; i++) { | |
| VERBOSE(("task_switch: sreg[%d] = 0x%04x", i, CPU_REGS_SREG(i))); | |
| } | |
| VERBOSE(("task_switch: new task")); | |
| if (!task16) { | |
| VERBOSE(("task_switch: CR3 = 0x%08x", cr3)); | |
| } | |
| VERBOSE(("task_switch: eip = 0x%08x", eip)); | VERBOSE(("task_switch: eip = 0x%08x", eip)); |
| VERBOSE(("task_switch: eflags = 0x%08x", new_flags)); | VERBOSE(("task_switch: eflags = 0x%08x", new_flags)); |
| for (i = 0; i < CPU_REG_NUM; i++) { | for (i = 0; i < CPU_REG_NUM; i++) { |
| Line 246 task_switch(selector_t* task_sel, int ty | Line 292 task_switch(selector_t* task_sel, int ty |
| VERBOSE(("task_switch: sreg[%d] = 0x%04x", i, sreg[i])); | VERBOSE(("task_switch: sreg[%d] = 0x%04x", i, sreg[i])); |
| } | } |
| VERBOSE(("task_switch: ldtr = 0x%04x", ldtr)); | VERBOSE(("task_switch: ldtr = 0x%04x", ldtr)); |
| VERBOSE(("task_switch: t = 0x%04x", t)); | if (!task16) { |
| VERBOSE(("task_switch: iobase = 0x%04x", iobase)); | VERBOSE(("task_switch: t = 0x%04x", t)); |
| VERBOSE(("task_switch: iobase = 0x%04x", iobase)); | |
| } | |
| #endif | #endif |
| /* if IRET or JMP, clear busy flag in this task: need */ | /* if IRET or JMP, clear busy flag in this task: need */ |
| Line 284 task_switch(selector_t* task_sel, int ty | Line 332 task_switch(selector_t* task_sel, int ty |
| } | } |
| } else { | } else { |
| cpu_kmemorywrite_w(cur_base + 14, CPU_IP); | cpu_kmemorywrite_w(cur_base + 14, CPU_IP); |
| cpu_kmemorywrite_w(cur_base + 16, (WORD)old_flags); | cpu_kmemorywrite_w(cur_base + 16, (UINT16)old_flags); |
| for (i = 0; i < CPU_REG_NUM; i++) { | for (i = 0; i < CPU_REG_NUM; i++) { |
| cpu_kmemorywrite_w(cur_base + 18 + i * 2, CPU_REGS_WORD(i)); | cpu_kmemorywrite_w(cur_base + 18 + i * 2, CPU_REGS_WORD(i)); |
| } | } |
| Line 295 task_switch(selector_t* task_sel, int ty | Line 343 task_switch(selector_t* task_sel, int ty |
| #if defined(MORE_DEBUG) | #if defined(MORE_DEBUG) |
| { | { |
| DWORD v; | UINT32 v; |
| VERBOSE(("task_switch: current task")); | VERBOSE(("task_switch: current task")); |
| for (i = 0; i < CPU_TR_DESC.u.seg.limit; i += 4) { | for (i = 0; i < CPU_TR_DESC.u.seg.limit; i += 4) { |
| Line 304 task_switch(selector_t* task_sel, int ty | Line 352 task_switch(selector_t* task_sel, int ty |
| } | } |
| } | } |
| #endif | #endif |
| /* set back link selector */ | /* set back link selector */ |
| switch (type) { | switch (type) { |
| case TASK_SWITCH_CALL: | case TASK_SWITCH_CALL: |
| Line 337 task_switch(selector_t* task_sel, int ty | Line 386 task_switch(selector_t* task_sel, int ty |
| break; | break; |
| case TASK_SWITCH_IRET: | case TASK_SWITCH_IRET: |
| #if defined(DEBUG) | |
| /* check busy flag is active */ | /* check busy flag is active */ |
| if (task_sel->desc.valid) { | if (task_sel->desc.valid) { |
| DWORD h; | UINT32 h; |
| h = cpu_kmemoryread_d(task_sel->addr + 4); | h = cpu_kmemoryread_d(task_sel->addr + 4); |
| if ((h & CPU_TSS_H_BUSY) == 0) { | if ((h & CPU_TSS_H_BUSY) == 0) { |
| VERBOSE(("task_switch: new task is not busy")); | ia32_panic("task_switch: new task is not busy"); |
| } | } |
| } | } |
| #endif | |
| break; | break; |
| default: | default: |
| Line 368 task_switch(selector_t* task_sel, int ty | Line 415 task_switch(selector_t* task_sel, int ty |
| set_CR3(cr3); | set_CR3(cr3); |
| } | } |
| /* set new EFLAGS */ | |
| set_eflags(new_flags, I_FLAG|IOPL_FLAG|RF_FLAG|VM_FLAG|VIF_FLAG|VIP_FLAG); | |
| /* set new EIP, GPR */ | /* set new EIP, GPR */ |
| CPU_PREV_EIP = CPU_EIP = eip; | CPU_PREV_EIP = CPU_EIP = eip; |
| CPU_PREFETCH_CLEAR(); | |
| for (i = 0; i < CPU_REG_NUM; i++) { | for (i = 0; i < CPU_REG_NUM; i++) { |
| CPU_REGS_DWORD(i) = regs[i]; | CPU_REGS_DWORD(i) = regs[i]; |
| } | } |
| for (i = 0; i < CPU_SEGREG_NUM; i++) { | for (i = 0; i < CPU_SEGREG_NUM; i++) { |
| CPU_REGS_SREG(i) = sreg[i]; | CPU_REGS_SREG(i) = sreg[i]; |
| CPU_STAT_SREG_CLEAR(i); | CPU_STAT_SREG_INIT(i); |
| } | |
| /* set new EFLAGS */ | |
| mask = I_FLAG|IOPL_FLAG|RF_FLAG|VM_FLAG|VIF_FLAG|VIP_FLAG; | |
| set_eflags(new_flags, mask); | |
| /* I/O deny bitmap */ | |
| if (!task16) { | |
| if (iobase != 0 && iobase < task_sel->desc.u.seg.limit) { | |
| CPU_STAT_IOLIMIT = (UINT16)(task_sel->desc.u.seg.limit - iobase); | |
| CPU_STAT_IOADDR = task_sel->desc.u.seg.segbase + iobase; | |
| } else { | |
| CPU_STAT_IOLIMIT = 0; | |
| } | |
| } else { | |
| CPU_STAT_IOLIMIT = 0; | |
| } | |
| VERBOSE(("task_switch: ioaddr = %08x, limit = %08x", CPU_STAT_IOADDR, CPU_STAT_IOLIMIT)); | |
| #if defined(IA32_SUPPORT_DEBUG_REGISTER) | |
| /* check resume flag */ | |
| if (CPU_EFLAG & RF_FLAG) { | |
| CPU_STAT_BP_EVENT |= CPU_STAT_BP_EVENT_RF; | |
| } | |
| /* clear local break point flags */ | |
| CPU_DR7 &= ~(CPU_DR7_L(0)|CPU_DR7_L(1)|CPU_DR7_L(2)|CPU_DR7_L(3)); | |
| CPU_STAT_BP = 0; | |
| for (i = 0; i < CPU_DEBUG_REG_INDEX_NUM; i++) { | |
| if (CPU_DR7 & CPU_DR7_G(i)) { | |
| CPU_STAT_BP |= (1 << i); | |
| } | |
| } | } |
| #endif | |
| /* load new LDTR */ | /* load new LDTR */ |
| load_ldtr(ldtr, TS_EXCEPTION); | load_ldtr(ldtr, TS_EXCEPTION); |
| /* set new segment register */ | /* set new segment register */ |
| if (CPU_STAT_VM86) { | if (!CPU_STAT_VM86) { |
| /* VM86 */ | /* clear segment descriptor cache */ |
| for (i = 0; i < nsreg; i++) { | for (i = 0; i < CPU_SEGREG_NUM; i++) { |
| CPU_STAT_SREG_INIT(i); | CPU_STAT_SREG_CLEAR(i); |
| load_segreg(i, sreg[i], TS_EXCEPTION); | |
| } | } |
| } else { | |
| /* load CS */ | /* load CS */ |
| rv = parse_selector_sv(&cs_sel, sreg[CPU_CS_INDEX]); | rv = parse_selector(&cs_sel, sreg[CPU_CS_INDEX]); |
| if (rv < 0) { | if (rv < 0) { |
| VERBOSE(("task_switch: load CS failure (sel = 0x%04x, rv = %d)", sreg[CPU_CS_INDEX], rv)); | VERBOSE(("task_switch: load CS failure (sel = 0x%04x, rv = %d)", sreg[CPU_CS_INDEX], rv)); |
| EXCEPTION(TS_EXCEPTION, cs_sel.idx); | EXCEPTION(TS_EXCEPTION, cs_sel.idx); |
| Line 427 task_switch(selector_t* task_sel, int ty | Line 504 task_switch(selector_t* task_sel, int ty |
| load_cs(cs_sel.selector, &cs_sel.desc, cs_sel.desc.dpl); | load_cs(cs_sel.selector, &cs_sel.desc, cs_sel.desc.dpl); |
| /* load ES, SS, DS, FS, GS segment register */ | /* load ES, SS, DS, FS, GS segment register */ |
| for (i = 0; i < nsreg; i++) { | for (i = 0; i < CPU_SEGREG_NUM; i++) { |
| if (i != CPU_CS_INDEX) { | if (i != CPU_CS_INDEX) { |
| load_segreg(i, sreg[i], TS_EXCEPTION); | load_segreg(i, sreg[i], TS_EXCEPTION); |
| } | } |
| } | } |
| } | } |
| /* I/O deny bitmap */ | |
| if (!task16) { | |
| if (task_sel->desc.u.seg.limit > iobase) { | |
| CPU_STAT_IOLIMIT = task_sel->desc.u.seg.limit - iobase; | |
| CPU_STAT_IOLIMIT *= 8; /* ビット単位で保持しておく */ | |
| CPU_STAT_IOADDR = task_sel->desc.u.seg.segbase + iobase; | |
| } else { | |
| CPU_STAT_IOLIMIT = 0; | |
| } | |
| } else { | |
| CPU_STAT_IOLIMIT = 0; | |
| } | |
| /* out of range */ | /* out of range */ |
| if (CPU_EIP > CPU_STAT_CS_LIMIT) { | if (CPU_EIP > CPU_STAT_CS_LIMIT) { |
| VERBOSE(("task_switch: new_ip is out of range. new_ip = %08x, limit = %08x", CPU_EIP, CPU_STAT_CS_LIMIT)); | VERBOSE(("task_switch: new_ip is out of range. new_ip = %08x, limit = %08x", CPU_EIP, CPU_STAT_CS_LIMIT)); |