--- np2/i386c/ia32/task.c 2011/12/17 01:36:54 1.25 +++ np2/i386c/ia32/task.c 2011/12/20 09:55:07 1.28 @@ -33,7 +33,7 @@ #define TSS_32_LIMIT (TSS_32_SIZE - 1) static void -set_task_busy(UINT16 selector, descriptor_t *sdp) +set_task_busy(UINT16 selector) { UINT32 addr; UINT32 h; @@ -41,7 +41,6 @@ set_task_busy(UINT16 selector, descripto addr = CPU_GDTR_BASE + (selector & CPU_SEGMENT_SELECTOR_INDEX_MASK); h = cpu_kmemoryread_d(addr + 4); if (!(h & CPU_TSS_H_BUSY)) { - sdp->type |= CPU_SYSDESC_TYPE_TSS_BUSY_IND; h |= CPU_TSS_H_BUSY; cpu_kmemorywrite_d(addr + 4, h); } else { @@ -50,7 +49,7 @@ set_task_busy(UINT16 selector, descripto } static void -set_task_free(UINT16 selector, descriptor_t *sdp) +set_task_free(UINT16 selector) { UINT32 addr; UINT32 h; @@ -58,7 +57,6 @@ set_task_free(UINT16 selector, descripto addr = CPU_GDTR_BASE + (selector & CPU_SEGMENT_SELECTOR_INDEX_MASK); h = cpu_kmemoryread_d(addr + 4); if (h & CPU_TSS_H_BUSY) { - sdp->type &= ~CPU_SYSDESC_TYPE_TSS_BUSY_IND; h &= ~CPU_TSS_H_BUSY; cpu_kmemorywrite_d(addr + 4, h); } else { @@ -112,16 +110,17 @@ load_tr(UINT16 selector) tr_dump(task_sel.selector, task_sel.desc.u.seg.segbase, task_sel.desc.u.seg.limit); #endif - set_task_busy(task_sel.selector, &task_sel.desc); + set_task_busy(task_sel.selector); CPU_TR = task_sel.selector; CPU_TR_DESC = task_sel.desc; + CPU_TR_DESC.type |= CPU_SYSDESC_TYPE_TSS_BUSY_IND; /* I/O deny bitmap */ CPU_STAT_IOLIMIT = 0; - if (task_sel.desc.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { - if (iobase != 0 && iobase < task_sel.desc.u.seg.limit) { - CPU_STAT_IOLIMIT = (UINT16)(task_sel.desc.u.seg.limit - iobase); - CPU_STAT_IOADDR = task_sel.desc.u.seg.segbase + iobase; + if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { + if (iobase != 0 && iobase < CPU_TR_DESC.u.seg.limit) { + CPU_STAT_IOLIMIT = (UINT16)(CPU_TR_DESC.u.seg.limit - iobase); + CPU_STAT_IOADDR = CPU_TR_DESC.u.seg.segbase + iobase; } } @@ -202,8 +201,10 @@ task_switch(selector_t *task_sel, task_s UINT16 ldtr; UINT16 iobase; UINT16 t; + int new_cpl; - selector_t cs_sel, ss_sel; + selector_t sreg_sel[CPU_SEGREG_NUM]; + selector_t ldtr_sel; int rv; UINT32 cur_base, cur_paddr; /* current task state */ @@ -334,7 +335,7 @@ task_switch(selector_t *task_sel, task_s /*FALLTHROUGH*/ case TASK_SWITCH_JMP: /* clear busy flags in current task */ - set_task_free(CPU_TR, &CPU_TR_DESC); + set_task_free(CPU_TR); break; case TASK_SWITCH_CALL: @@ -409,9 +410,9 @@ task_switch(selector_t *task_sel, task_s new_flags |= NT_FLAG; /*FALLTHROUGH*/ case TASK_SWITCH_JMP: - set_task_busy(task_sel->selector, &task_sel->desc); + set_task_busy(task_sel->selector); break; - + case TASK_SWITCH_IRET: /* check busy flag is active */ if (SEG_IS_VALID(&task_sel->desc)) { @@ -431,29 +432,49 @@ task_switch(selector_t *task_sel, task_s /* load task selector to CPU_TR */ CPU_TR = task_sel->selector; CPU_TR_DESC = task_sel->desc; - - /* clear BUSY flag in descriptor cache */ - CPU_TR_DESC.type &= ~CPU_SYSDESC_TYPE_TSS_BUSY_IND; + CPU_TR_DESC.type |= CPU_SYSDESC_TYPE_TSS_BUSY_IND; /* set CR0 image CPU_CR0_TS */ CPU_CR0 |= CPU_CR0_TS; /* - * load task state (CR3, EFLAG, EIP, GPR, segreg, LDTR) + * load task state (EIP, GPR, EFLAG, segreg, CR3, LDTR) */ - /* set new CR3 */ - if (!task16 && CPU_STAT_PAGING) { - set_cr3(cr3); - } - /* set new EIP, GPR */ CPU_EIP = eip; for (i = 0; i < CPU_REG_NUM; i++) { CPU_REGS_DWORD(i) = regs[i]; } + + CPU_CLEAR_PREV_ESP(); + + /* set new EFLAGS */ + set_eflags(new_flags, I_FLAG|IOPL_FLAG|RF_FLAG|VM_FLAG|VIF_FLAG|VIP_FLAG); + + /* check new segregs, ldtr */ for (i = 0; i < CPU_SEGREG_NUM; i++) { - segdesc_init(i, sreg[i], &CPU_STAT_SREG(i)); + rv = parse_selector(&sreg_sel[i], sreg[i]); + if (rv < 0) { + VERBOSE(("task_switch: selector parse failure: index=%d (sel = 0x%04x, rv = %d)", i, sreg[i], rv)); + EXCEPTION(TS_EXCEPTION, sreg_sel[i].idx); + } + } + rv = parse_selector(&ldtr_sel, ldtr); + if (rv < 0) { + VERBOSE(("task_switch: LDTR selector parse failure (sel = 0x%04x, rv = %d)", ldtr, rv)); + EXCEPTION(TS_EXCEPTION, ldtr_sel.idx); + } + + /* invalidate segreg, ldtr descriptor */ + for (i = 0; i < CPU_SEGREG_NUM; i++) { + CPU_STAT_SREG(i).valid = 0; + } + CPU_LDTR_DESC.valid = 0; + + /* set new CR3 */ + if (!task16 && CPU_STAT_PAGING) { + set_cr3(cr3); } /* load new LDTR */ @@ -461,8 +482,8 @@ task_switch(selector_t *task_sel, task_s /* I/O deny bitmap */ CPU_STAT_IOLIMIT = 0; - if (!task16 && iobase != 0 && iobase < task_sel->desc.u.seg.limit) { - CPU_STAT_IOLIMIT = (UINT16)(task_sel->desc.u.seg.limit - iobase); + if (!task16 && iobase != 0 && iobase < CPU_TR_DESC.u.seg.limit) { + CPU_STAT_IOLIMIT = (UINT16)(CPU_TR_DESC.u.seg.limit - iobase); CPU_STAT_IOADDR = task_base + iobase; } VERBOSE(("task_switch: ioaddr = %08x, limit = %08x", CPU_STAT_IOADDR, CPU_STAT_IOLIMIT)); @@ -483,89 +504,87 @@ task_switch(selector_t *task_sel, task_s } #endif - /* set new EFLAGS */ - set_eflags(new_flags, I_FLAG|IOPL_FLAG|RF_FLAG|VM_FLAG|VIF_FLAG|VIP_FLAG); - /* set new segment register */ - if (!CPU_STAT_VM86) { - /* clear segment descriptor cache */ - for (i = 0; i < CPU_SEGREG_NUM; i++) { - segdesc_clear(&CPU_STAT_SREG(i)); + new_cpl = sreg_sel[CPU_CS_INDEX].rpl; + if (CPU_STAT_VM86) { + load_ss(sreg_sel[CPU_SS_INDEX].selector, + &sreg_sel[CPU_SS_INDEX].desc, new_cpl); + LOAD_SEGREG1(CPU_ES_INDEX, sreg_sel[CPU_ES_INDEX].selector, + TS_EXCEPTION); + LOAD_SEGREG1(CPU_DS_INDEX, sreg_sel[CPU_DS_INDEX].selector, + TS_EXCEPTION); + LOAD_SEGREG1(CPU_FS_INDEX, sreg_sel[CPU_FS_INDEX].selector, + TS_EXCEPTION); + LOAD_SEGREG1(CPU_GS_INDEX, sreg_sel[CPU_GS_INDEX].selector, + TS_EXCEPTION); + load_cs(sreg_sel[CPU_CS_INDEX].selector, + &sreg_sel[CPU_CS_INDEX].desc, new_cpl); + } else { + /* load SS */ + + /* SS must be writable data segment */ + if (SEG_IS_SYSTEM(&sreg_sel[CPU_SS_INDEX].desc) + || SEG_IS_CODE(&sreg_sel[CPU_SS_INDEX].desc) + || !SEG_IS_WRITABLE_DATA(&sreg_sel[CPU_SS_INDEX].desc)) { + EXCEPTION(TS_EXCEPTION, sreg_sel[CPU_SS_INDEX].idx); } - /* load CS */ - rv = parse_selector(&cs_sel, sreg[CPU_CS_INDEX]); + /* check privilege level */ + if ((sreg_sel[CPU_SS_INDEX].desc.dpl != new_cpl) + || (sreg_sel[CPU_SS_INDEX].desc.dpl != new_cpl)) { + EXCEPTION(TS_EXCEPTION, sreg_sel[CPU_SS_INDEX].idx); + } + + /* stack segment is not present */ + rv = selector_is_not_present(&sreg_sel[CPU_SS_INDEX]); if (rv < 0) { - VERBOSE(("task_switch: load CS failure (sel = 0x%04x, rv = %d)", sreg[CPU_CS_INDEX], rv)); - EXCEPTION(TS_EXCEPTION, cs_sel.idx); + EXCEPTION(SS_EXCEPTION, sreg_sel[CPU_SS_INDEX].idx); } + /* Now loading SS register */ + load_ss(sreg_sel[CPU_SS_INDEX].selector, + &sreg_sel[CPU_SS_INDEX].desc, new_cpl); + + /* load ES, DS, FS, GS segment register */ + LOAD_SEGREG1(CPU_ES_INDEX, sreg_sel[CPU_ES_INDEX].selector, + TS_EXCEPTION); + LOAD_SEGREG1(CPU_DS_INDEX, sreg_sel[CPU_DS_INDEX].selector, + TS_EXCEPTION); + LOAD_SEGREG1(CPU_FS_INDEX, sreg_sel[CPU_FS_INDEX].selector, + TS_EXCEPTION); + LOAD_SEGREG1(CPU_GS_INDEX, sreg_sel[CPU_GS_INDEX].selector, + TS_EXCEPTION); + + /* load CS */ + /* CS must be code segment */ - if (SEG_IS_SYSTEM(&cs_sel.desc) || SEG_IS_DATA(&cs_sel.desc)) { - EXCEPTION(TS_EXCEPTION, cs_sel.idx); + if (SEG_IS_SYSTEM(&sreg_sel[CPU_CS_INDEX].desc) + || SEG_IS_DATA(&sreg_sel[CPU_CS_INDEX].desc)) { + EXCEPTION(TS_EXCEPTION, sreg_sel[CPU_CS_INDEX].idx); } /* check privilege level */ - if (!SEG_IS_CONFORMING_CODE(&cs_sel.desc)) { + if (!SEG_IS_CONFORMING_CODE(&sreg_sel[CPU_CS_INDEX].desc)) { /* non-confirming code segment */ - if (cs_sel.desc.dpl != cs_sel.rpl) { - EXCEPTION(TS_EXCEPTION, cs_sel.idx); + if (sreg_sel[CPU_CS_INDEX].desc.dpl != new_cpl) { + EXCEPTION(TS_EXCEPTION, sreg_sel[CPU_CS_INDEX].idx); } } else { /* conforming code segment */ - if (cs_sel.desc.dpl > cs_sel.rpl) { - EXCEPTION(TS_EXCEPTION, cs_sel.idx); + if (sreg_sel[CPU_CS_INDEX].desc.dpl > new_cpl) { + EXCEPTION(TS_EXCEPTION, sreg_sel[CPU_CS_INDEX].idx); } } /* code segment is not present */ - rv = selector_is_not_present(&cs_sel); - if (rv < 0) { - EXCEPTION(NP_EXCEPTION, cs_sel.idx); - } - - /* load SS */ - rv = parse_selector(&ss_sel, sreg[CPU_SS_INDEX]); - if (rv < 0) { - VERBOSE(("task_switch: load SS failure (sel = 0x%04x, rv = %d)", sreg[CPU_SS_INDEX], rv)); - EXCEPTION(TS_EXCEPTION, ss_sel.idx); - } - - /* SS must be writable data segment */ - if (SEG_IS_SYSTEM(&ss_sel.desc) - || SEG_IS_CODE(&ss_sel.desc) - || !SEG_IS_WRITABLE_DATA(&ss_sel.desc)) { - EXCEPTION(TS_EXCEPTION, ss_sel.idx); - } - - /* check privilege level */ - if ((ss_sel.desc.dpl != cs_sel.rpl) - || (ss_sel.desc.dpl != ss_sel.rpl)) { - EXCEPTION(TS_EXCEPTION, ss_sel.idx); - } - - /* stack segment is not present */ - rv = selector_is_not_present(&ss_sel); + rv = selector_is_not_present(&sreg_sel[CPU_CS_INDEX]); if (rv < 0) { - EXCEPTION(SS_EXCEPTION, ss_sel.idx); - } - - /* Now loading CS/SS register */ - load_cs(cs_sel.selector, &cs_sel.desc, cs_sel.rpl); - load_ss(ss_sel.selector, &ss_sel.desc, cs_sel.rpl); - - /* load ES, DS, FS, GS segment register */ - for (i = 0; i < CPU_SEGREG_NUM; i++) { - if (i != CPU_CS_INDEX || i != CPU_SS_INDEX) { - LOAD_SEGREG1(i, sreg[i], TS_EXCEPTION); - } + EXCEPTION(NP_EXCEPTION, sreg_sel[CPU_CS_INDEX].idx); } - } - /* out of range */ - if (CPU_EIP > CPU_STAT_CS_LIMIT) { - VERBOSE(("task_switch: new_ip is out of range. new_ip = %08x, limit = %08x", CPU_EIP, CPU_STAT_CS_LIMIT)); - EXCEPTION(GP_EXCEPTION, 0); + /* Now loading CS register */ + load_cs(sreg_sel[CPU_CS_INDEX].selector, + &sreg_sel[CPU_CS_INDEX].desc, new_cpl); } VERBOSE(("task_switch: done."));