| version 1.3, 2004/01/14 16:14:49 | version 1.6, 2004/01/26 15:23:55 | 
| Line 69  load_tr(WORD selector) | Line 69  load_tr(WORD selector) | 
 | } | } | 
 |  |  | 
 | #if defined(DEBUG) | #if defined(DEBUG) | 
| { | tr_dump(task_sel.selector, task_sel.desc.u.seg.segbase, task_sel.desc.u.seg.limit); | 
| DWORD v; |  | 
| DWORD i; |  | 
|  |  | 
| for (i = 0; i < task_sel.desc.u.seg.limit; i += 4) { |  | 
| v = cpu_lmemoryread_d(task_sel.desc.u.seg.segbase + i); |  | 
| VERBOSE(("task_sel: %08x: %08x", task_sel.desc.u.seg.segbase + i, v)); |  | 
| } |  | 
| } |  | 
 | #endif | #endif | 
 |  |  | 
 | CPU_SET_TASK_BUSY(&task_sel.desc); | CPU_SET_TASK_BUSY(&task_sel.desc); | 
| Line 86  load_tr(WORD selector) | Line 78  load_tr(WORD selector) | 
 | } | } | 
 |  |  | 
 | void | void | 
| get_stack_from_tss(DWORD pl, WORD* new_ss, DWORD* new_esp) | get_stack_from_tss(DWORD pl, WORD *new_ss, DWORD *new_esp) | 
 | { | { | 
 | DWORD tss_stack_addr; | DWORD tss_stack_addr; | 
 |  |  | 
| switch (CPU_TR_DESC.type) { | __ASSERT(pl < 3); | 
| case CPU_SYSDESC_TYPE_TSS_BUSY_32: |  | 
|  | if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { | 
 | tss_stack_addr = pl * 8 + 4; | tss_stack_addr = pl * 8 + 4; | 
 | if (tss_stack_addr + 7 > CPU_TR_DESC.u.seg.limit) { | if (tss_stack_addr + 7 > CPU_TR_DESC.u.seg.limit) { | 
 | EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); | EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); | 
| Line 99  get_stack_from_tss(DWORD pl, WORD* new_s | Line 92  get_stack_from_tss(DWORD pl, WORD* new_s | 
 | tss_stack_addr += CPU_TR_DESC.u.seg.segbase; | tss_stack_addr += CPU_TR_DESC.u.seg.segbase; | 
 | *new_esp = cpu_lmemoryread_d(tss_stack_addr); | *new_esp = cpu_lmemoryread_d(tss_stack_addr); | 
 | *new_ss = cpu_lmemoryread_w(tss_stack_addr + 4); | *new_ss = cpu_lmemoryread_w(tss_stack_addr + 4); | 
| break; | } else if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_16) { | 
|  |  | 
| case CPU_SYSDESC_TYPE_TSS_BUSY_16: |  | 
 | tss_stack_addr = pl * 4 + 2; | tss_stack_addr = pl * 4 + 2; | 
 | if (tss_stack_addr + 3 > CPU_TR_DESC.u.seg.limit) { | if (tss_stack_addr + 3 > CPU_TR_DESC.u.seg.limit) { | 
 | EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); | EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); | 
| Line 109  get_stack_from_tss(DWORD pl, WORD* new_s | Line 100  get_stack_from_tss(DWORD pl, WORD* new_s | 
 | tss_stack_addr += CPU_TR_DESC.u.seg.segbase; | tss_stack_addr += CPU_TR_DESC.u.seg.segbase; | 
 | *new_esp = cpu_lmemoryread_w(tss_stack_addr); | *new_esp = cpu_lmemoryread_w(tss_stack_addr); | 
 | *new_ss = cpu_lmemoryread_w(tss_stack_addr + 2); | *new_ss = cpu_lmemoryread_w(tss_stack_addr + 2); | 
| break; | } else { | 
|  | ia32_panic("get_stack_from_tss: task register is invalid (%d)\n", CPU_TR_DESC.type); | 
| default: |  | 
| ia32_panic("get_stack_from_tss: TR is invalid (%d)\n", |  | 
| CPU_TR_DESC.type); |  | 
| break; |  | 
 | } | } | 
 |  |  | 
 |  | VERBOSE(("get_stack_from_tss: pl = %d, new_esp = 0x%08x, new_ss = 0x%04x", pl, *new_esp, *new_ss)); | 
 | } | } | 
 |  |  | 
 | WORD | WORD | 
 | get_link_selector_from_tss() | get_link_selector_from_tss() | 
 | { | { | 
 |  | WORD backlink; | 
 |  |  | 
 | if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { | if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { | 
 | if (4 > CPU_TR_DESC.u.seg.limit) { | if (4 > CPU_TR_DESC.u.seg.limit) { | 
| Line 131  get_link_selector_from_tss() | Line 121  get_link_selector_from_tss() | 
 | EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); | EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); | 
 | } | } | 
 | } else { | } else { | 
| ia32_panic("get_link_selector_from_tss: TR is invalid (%d)\n", | ia32_panic("get_link_selector_from_tss: task register is invalid (%d)\n", CPU_TR_DESC.type); | 
| CPU_TR_DESC.type); |  | 
| return 0;       /* compiler happy */ |  | 
 | } | } | 
 |  |  | 
| return cpu_lmemoryread_w(CPU_TR_DESC.u.seg.segbase); | backlink = cpu_lmemoryread_w(CPU_TR_DESC.u.seg.segbase); | 
|  | VERBOSE(("get_link_selector_from_tss: backlink selector = 0x%04x", backlink)); | 
|  | return backlink; | 
 | } | } | 
 |  |  | 
 | void | void | 
| Line 157  task_switch(selector_t* task_sel, int ty | Line 147  task_switch(selector_t* task_sel, int ty | 
 | DWORD task_base;        /* new task state */ | DWORD task_base;        /* new task state */ | 
 | DWORD old_flags = REAL_EFLAGREG; | DWORD old_flags = REAL_EFLAGREG; | 
 | BOOL task16; | BOOL task16; | 
| int nsreg; | DWORD nsreg; | 
| int i; | DWORD i; | 
|  |  | 
|  | VERBOSE(("task_switch: start")); | 
 |  |  | 
 | cur_base = CPU_TR_DESC.u.seg.segbase; | cur_base = CPU_TR_DESC.u.seg.segbase; | 
 | task_base = task_sel->desc.u.seg.segbase; | task_base = task_sel->desc.u.seg.segbase; | 
 | VERBOSE(("task_switch: current task base address = 0x%08x", cur_base)); | VERBOSE(("task_switch: current task base address = 0x%08x", cur_base)); | 
| VERBOSE(("task_switch: new task base address = 0x%08x", task_base)); | VERBOSE(("task_switch: new task base address     = 0x%08x", task_base)); | 
 |  |  | 
 | /* limit check */ | /* limit check */ | 
 | switch (task_sel->desc.type) { | switch (task_sel->desc.type) { | 
| Line 192  task_switch(selector_t* task_sel, int ty | Line 184  task_switch(selector_t* task_sel, int ty | 
 | break; | break; | 
 | } | } | 
 |  |  | 
 |  | #if defined(DEBUG) | 
 |  | { | 
 |  | DWORD v; | 
 |  |  | 
 |  | VERBOSE(("task_switch: new task")); | 
 |  | for (i = 0; i < task_sel->desc.u.seg.limit; i += 4) { | 
 |  | v = cpu_lmemoryread_d(task_base + i); | 
 |  | VERBOSE(("task_switch: 0x%08x: %08x", task_base + i,v)); | 
 |  | } | 
 |  | } | 
 |  | #endif | 
 |  |  | 
 | if (CPU_STAT_PAGING) { | if (CPU_STAT_PAGING) { | 
 | /* task state paging check */ | /* task state paging check */ | 
 | paging_check(cur_base, CPU_TR_DESC.u.seg.limit, CPU_PAGING_PAGE_WRITE); | paging_check(cur_base, CPU_TR_DESC.u.seg.limit, CPU_PAGING_PAGE_WRITE); | 
| Line 201  task_switch(selector_t* task_sel, int ty | Line 205  task_switch(selector_t* task_sel, int ty | 
 | /* load task state */ | /* load task state */ | 
 | memset(sreg, 0, sizeof(sreg)); | memset(sreg, 0, sizeof(sreg)); | 
 | if (!task16) { | if (!task16) { | 
| if (CPU_STAT_PAGING) { | cr3 = cpu_lmemoryread_d(task_base + 28); | 
| cr3 = cpu_lmemoryread_d(task_base + 28); |  | 
| } |  | 
 | eip = cpu_lmemoryread_d(task_base + 32); | eip = cpu_lmemoryread_d(task_base + 32); | 
 | new_flags = cpu_lmemoryread_d(task_base + 36); | new_flags = cpu_lmemoryread_d(task_base + 36); | 
 | for (i = 0; i < CPU_REG_NUM; i++) { | for (i = 0; i < CPU_REG_NUM; i++) { | 
| Line 229  task_switch(selector_t* task_sel, int ty | Line 231  task_switch(selector_t* task_sel, int ty | 
 | t = 0; | t = 0; | 
 | iobase = 0; | iobase = 0; | 
 | } | } | 
 |  |  | 
 | #if defined(DEBUG) | #if defined(DEBUG) | 
 | VERBOSE(("task_switch: %dbit task", task16 ? 16 : 32)); | VERBOSE(("task_switch: %dbit task", task16 ? 16 : 32)); | 
| VERBOSE(("task_switch: CR3 = 0x%08x", cr3)); | VERBOSE(("task_switch: CR3     = 0x%08x", cr3)); | 
| VERBOSE(("task_switch: eip = 0x%08x", eip)); | VERBOSE(("task_switch: eip     = 0x%08x", eip)); | 
| VERBOSE(("task_switch: eflags = 0x%08x", new_flags)); | VERBOSE(("task_switch: eflags  = 0x%08x", new_flags)); | 
 | for (i = 0; i < CPU_REG_NUM; i++) { | for (i = 0; i < CPU_REG_NUM; i++) { | 
 | VERBOSE(("task_switch: regs[%d] = 0x%08x", i, regs[i])); | VERBOSE(("task_switch: regs[%d] = 0x%08x", i, regs[i])); | 
 | } | } | 
 | VERBOSE(("task_switch: nsreg = %d", nsreg)); |  | 
 | for (i = 0; i < nsreg; i++) { | for (i = 0; i < nsreg; i++) { | 
 | VERBOSE(("task_switch: sreg[%d] = 0x%04x", i, sreg[i])); | VERBOSE(("task_switch: sreg[%d] = 0x%04x", i, sreg[i])); | 
 | } | } | 
| VERBOSE(("task_switch: ldtr = 0x%04x", ldtr)); | VERBOSE(("task_switch: ldtr    = 0x%04x", ldtr)); | 
| VERBOSE(("task_switch: t = 0x%04x", t)); | VERBOSE(("task_switch: t       = 0x%04x", t)); | 
| VERBOSE(("task_switch: iobase = 0x%04x", iobase)); | VERBOSE(("task_switch: iobase  = 0x%04x", iobase)); | 
 | #endif | #endif | 
 |  |  | 
 | /* if IRET or JMP, clear busy flag in this task: need */ | /* if IRET or JMP, clear busy flag in this task: need */ | 
| Line 279  task_switch(selector_t* task_sel, int ty | Line 281  task_switch(selector_t* task_sel, int ty | 
 | for (i = 0; i < nsreg; i++) { | for (i = 0; i < nsreg; i++) { | 
 | cpu_lmemorywrite_d(cur_base + 72 + i * 4, CPU_REGS_SREG(i)); | cpu_lmemorywrite_d(cur_base + 72 + i * 4, CPU_REGS_SREG(i)); | 
 | } | } | 
| cpu_lmemorywrite_d(cur_base + 96, CPU_LDTR); | cpu_lmemorywrite_w(cur_base + 96, CPU_LDTR); | 
 | } else { | } else { | 
 | cpu_lmemorywrite_w(cur_base + 14, CPU_IP); | cpu_lmemorywrite_w(cur_base + 14, CPU_IP); | 
 | cpu_lmemorywrite_w(cur_base + 16, (WORD)old_flags); | cpu_lmemorywrite_w(cur_base + 16, (WORD)old_flags); | 
| Line 292  task_switch(selector_t* task_sel, int ty | Line 294  task_switch(selector_t* task_sel, int ty | 
 | cpu_lmemorywrite_w(cur_base + 42, CPU_LDTR); | cpu_lmemorywrite_w(cur_base + 42, CPU_LDTR); | 
 | } | } | 
 |  |  | 
 |  | #if defined(DEBUG) | 
 |  | { | 
 |  | DWORD v; | 
 |  |  | 
 |  | VERBOSE(("task_switch: current task")); | 
 |  | for (i = 0; i < CPU_TR_DESC.u.seg.limit; i += 4) { | 
 |  | v = cpu_lmemoryread_d(cur_base + i); | 
 |  | VERBOSE(("task_switch: 0x%08x: %08x", cur_base + i, v)); | 
 |  | } | 
 |  | } | 
 |  | #endif | 
 | /* set back link selector */ | /* set back link selector */ | 
 | switch (type) { | switch (type) { | 
 | case TASK_SWITCH_CALL: | case TASK_SWITCH_CALL: | 
 | case TASK_SWITCH_INTR: | case TASK_SWITCH_INTR: | 
 | /* set back link selector */ | /* set back link selector */ | 
| cpu_lmemorywrite_d(task_base, CPU_TR); | cpu_lmemorywrite_w(task_base, CPU_TR); | 
 | break; | break; | 
 |  |  | 
 | case TASK_SWITCH_IRET: | case TASK_SWITCH_IRET: | 
| Line 312  task_switch(selector_t* task_sel, int ty | Line 325  task_switch(selector_t* task_sel, int ty | 
 |  |  | 
 | /* Now task switching! */ | /* Now task switching! */ | 
 |  |  | 
| /* if CALL, INTR, set EFLAG image NT_FLAG */ | /* if CALL, INTR, set EFLAGS image NT_FLAG */ | 
 | /* if CALL, INTR, JMP set busy flag */ | /* if CALL, INTR, JMP set busy flag */ | 
 | switch (type) { | switch (type) { | 
 | case TASK_SWITCH_CALL: | case TASK_SWITCH_CALL: | 
| Line 342  task_switch(selector_t* task_sel, int ty | Line 355  task_switch(selector_t* task_sel, int ty | 
 | CPU_TR_DESC = task_sel->desc; | CPU_TR_DESC = task_sel->desc; | 
 |  |  | 
 | /* load task state (CR3, EFLAG, EIP, GPR, segreg, LDTR) */ | /* load task state (CR3, EFLAG, EIP, GPR, segreg, LDTR) */ | 
| if (CPU_STAT_PAGING) { |  | 
| /* XXX setCR3()? */ | /* set new CR3 */ | 
| CPU_CR3 = cr3 & 0xfffff018; | if (!task16) { | 
| tlb_flush(FALSE); | set_CR3(cr3); | 
 | } | } | 
 |  |  | 
| /* set new EFLAGS, EIP, GPR, segment register, LDTR */ | /* set new EFLAGS */ | 
 | set_eflags(new_flags, I_FLAG|IOPL_FLAG|RF_FLAG|VM_FLAG|VIF_FLAG|VIP_FLAG); | set_eflags(new_flags, I_FLAG|IOPL_FLAG|RF_FLAG|VM_FLAG|VIF_FLAG|VIP_FLAG); | 
 |  |  | 
 |  | /* set new EIP, GPR */ | 
 | CPU_PREV_EIP = CPU_EIP = eip; | CPU_PREV_EIP = CPU_EIP = eip; | 
 | for (i = 0; i < CPU_REG_NUM; i++) { | for (i = 0; i < CPU_REG_NUM; i++) { | 
 | CPU_REGS_DWORD(i) = regs[i]; | CPU_REGS_DWORD(i) = regs[i]; | 
| Line 359  task_switch(selector_t* task_sel, int ty | Line 374  task_switch(selector_t* task_sel, int ty | 
 | CPU_STAT_SREG_CLEAR(i); | CPU_STAT_SREG_CLEAR(i); | 
 | } | } | 
 |  |  | 
| /* load LDTR */ | /* load new LDTR */ | 
 | load_ldtr(ldtr, TS_EXCEPTION); | load_ldtr(ldtr, TS_EXCEPTION); | 
 |  |  | 
| /* load CS */ | /* set new segment register */ | 
| rv = parse_selector(&cs_sel, sreg[CPU_CS_INDEX]); | if (CPU_STAT_VM86) { | 
| if (rv < 0) { | /* VM86 */ | 
| VERBOSE(("task_switch: load CS failure (sel = 0x%04x, rv = %d)", sreg[CPU_CS_INDEX], rv)); | /* clear 32bit */ | 
| EXCEPTION(TS_EXCEPTION, cs_sel.idx); | CPU_STATSAVE.cpu_inst_default.op_32 = | 
| } | CPU_STATSAVE.cpu_inst_default.as_32 = 0; | 
|  | CPU_STAT_SS32 = 0; | 
| /* CS register must be code segment */ | CPU_STAT_CPL = task_sel->desc.dpl; | 
| if (!cs_sel.desc.s || !cs_sel.desc.u.seg.c) { |  | 
| EXCEPTION(TS_EXCEPTION, cs_sel.idx); | for (i = 0; i < nsreg; i++) { | 
| } | CPU_STAT_SREG_INIT(i); | 
|  | load_segreg(i, sreg[i], TS_EXCEPTION); | 
| /* check privilege level */ |  | 
| if (!cs_sel.desc.u.seg.ec) { |  | 
| /* non-confirming code segment */ |  | 
| if (cs_sel.desc.dpl != cs_sel.rpl) { |  | 
| EXCEPTION(TS_EXCEPTION, cs_sel.idx); |  | 
 | } | } | 
 | } else { | } else { | 
| /* confirming code segment */ | /* load CS */ | 
| if (cs_sel.desc.dpl < cs_sel.rpl) { | rv = parse_selector(&cs_sel, sreg[CPU_CS_INDEX]); | 
|  | if (rv < 0) { | 
|  | VERBOSE(("task_switch: load CS failure (sel = 0x%04x, rv = %d)", sreg[CPU_CS_INDEX], rv)); | 
 | EXCEPTION(TS_EXCEPTION, cs_sel.idx); | EXCEPTION(TS_EXCEPTION, cs_sel.idx); | 
 | } | } | 
 | } |  | 
 |  |  | 
| /* CS segment is not present */ | /* CS register must be code segment */ | 
| rv = selector_is_not_present(&cs_sel); | if (!cs_sel.desc.s || !cs_sel.desc.u.seg.c) { | 
| if (rv < 0) { | EXCEPTION(TS_EXCEPTION, cs_sel.idx); | 
| EXCEPTION(NP_EXCEPTION, cs_sel.idx); | } | 
| } |  | 
 |  |  | 
| /* Now loading CS register */ | /* check privilege level */ | 
| load_cs(cs_sel.selector, &cs_sel.desc, cs_sel.desc.dpl); | if (!cs_sel.desc.u.seg.ec) { | 
|  | /* non-confirming code segment */ | 
|  | if (cs_sel.desc.dpl != cs_sel.rpl) { | 
|  | EXCEPTION(TS_EXCEPTION, cs_sel.idx); | 
|  | } | 
|  | } else { | 
|  | /* confirming code segment */ | 
|  | if (cs_sel.desc.dpl < cs_sel.rpl) { | 
|  | EXCEPTION(TS_EXCEPTION, cs_sel.idx); | 
|  | } | 
|  | } | 
 |  |  | 
| /* load ES, SS, DS, FS, GS segment register */ | /* code segment is not present */ | 
| for (i = 0; i < nsreg; i++) { | rv = selector_is_not_present(&cs_sel); | 
| if (i != CPU_CS_INDEX) { | if (rv < 0) { | 
| load_segreg(i, sreg[i], TS_EXCEPTION); | EXCEPTION(NP_EXCEPTION, cs_sel.idx); | 
|  | } | 
|  |  | 
|  | /* Now loading CS register */ | 
|  | load_cs(cs_sel.selector, &cs_sel.desc, cs_sel.desc.dpl); | 
|  |  | 
|  | /* load ES, SS, DS, FS, GS segment register */ | 
|  | for (i = 0; i < nsreg; i++) { | 
|  | if (i != CPU_CS_INDEX) { | 
|  | load_segreg(i, sreg[i], TS_EXCEPTION); | 
|  | } | 
 | } | } | 
 | } | } | 
 |  |  | 
| Line 416  task_switch(selector_t* task_sel, int ty | Line 446  task_switch(selector_t* task_sel, int ty | 
 | CPU_STAT_IOLIMIT = 0; | CPU_STAT_IOLIMIT = 0; | 
 | } | } | 
 |  |  | 
| /* running new task */ | /* out of range */ | 
| SET_EIP(eip); | if (CPU_EIP > CPU_STAT_CS_LIMIT) { | 
|  | VERBOSE(("task_switch: new_ip is out of range. new_ip = %08x, limit = %08x", CPU_EIP, CPU_STAT_CS_LIMIT)); | 
|  | EXCEPTION(GP_EXCEPTION, 0); | 
|  | } | 
|  |  | 
|  | VERBOSE(("task_switch: done.")); | 
 | } | } |