--- np2/i386c/ia32/task.c 2011/12/20 09:03:28 1.27 +++ np2/i386c/ia32/task.c 2012/01/08 11:36:06 1.33 @@ -32,7 +32,7 @@ #define TSS_32_SIZE 104 #define TSS_32_LIMIT (TSS_32_SIZE - 1) -static void +static void CPUCALL set_task_busy(UINT16 selector) { UINT32 addr; @@ -48,7 +48,7 @@ set_task_busy(UINT16 selector) } } -static void +static void CPUCALL set_task_free(UINT16 selector) { UINT32 addr; @@ -64,14 +64,11 @@ set_task_free(UINT16 selector) } } -void +void CPUCALL load_tr(UINT16 selector) { selector_t task_sel; int rv; -#if defined(IA32_SUPPORT_DEBUG_REGISTER) - int i; -#endif UINT16 iobase; rv = parse_selector(&task_sel, selector); @@ -123,26 +120,15 @@ load_tr(UINT16 selector) CPU_STAT_IOADDR = CPU_TR_DESC.u.seg.segbase + iobase; } } - -#if defined(IA32_SUPPORT_DEBUG_REGISTER) - /* clear local break point flags */ - CPU_DR7 &= ~(CPU_DR7_L(0)|CPU_DR7_L(1)|CPU_DR7_L(2)|CPU_DR7_L(3)|CPU_DR7_LE); - CPU_STAT_BP = 0; - for (i = 0; i < CPU_DEBUG_REG_INDEX_NUM; i++) { - if (CPU_DR7 & CPU_DR7_G(i)) { - CPU_STAT_BP |= (1 << i); - } - } -#endif } -void +void CPUCALL get_stack_pointer_from_tss(UINT pl, UINT16 *new_ss, UINT32 *new_esp) { UINT32 tss_stack_addr; VERBOSE(("get_stack_pointer_from_tss: pl = %d", pl)); - VERBOSE(("CPU_TR type = %d, base = 0x%08x, limit = 0x%08x", CPU_TR_DESC.type, CPU_TR_BASE, CPU_TR_LIMIT)); + VERBOSE(("get_stack_pointer_from_tss: CPU_TR type = %d, base = 0x%08x, limit = 0x%08x", CPU_TR_DESC.type, CPU_TR_BASE, CPU_TR_LIMIT)); __ASSERT(pl < 3); @@ -165,7 +151,7 @@ get_stack_pointer_from_tss(UINT pl, UINT } else { ia32_panic("get_stack_pointer_from_tss: task register is invalid (%d)\n", CPU_TR_DESC.type); } - VERBOSE(("new stack pointer = %04x:%08x", *new_ss, *new_esp)); + VERBOSE(("get_stack_pointer_from_tss: new stack pointer = %04x:%08x", *new_ss, *new_esp)); } UINT16 @@ -190,7 +176,7 @@ get_backlink_selector_from_tss(void) return backlink; } -void +void CPUCALL task_switch(selector_t *task_sel, task_switch_type_t type) { UINT32 regs[CPU_REG_NUM]; @@ -201,10 +187,8 @@ task_switch(selector_t *task_sel, task_s UINT16 ldtr; UINT16 iobase; UINT16 t; - int new_cpl; - selector_t sreg_sel[CPU_SEGREG_NUM]; - selector_t ldtr_sel; + selector_t cs_sel, ss_sel; int rv; UINT32 cur_base, cur_paddr; /* current task state */ @@ -438,40 +422,29 @@ task_switch(selector_t *task_sel, task_s CPU_CR0 |= CPU_CR0_TS; /* - * load task state (EIP, GPR, EFLAG, segreg, CR3, LDTR) + * load task state (CR3, EIP, GPR, segregs, LDTR, EFLAGS) */ - /* set new EIP, GPR */ + /* set new CR3 */ + if (!task16 && CPU_STAT_PAGING) { + set_cr3(cr3); + } + + /* set new EIP, GPR, segregs */ CPU_EIP = eip; for (i = 0; i < CPU_REG_NUM; i++) { CPU_REGS_DWORD(i) = regs[i]; } - - CPU_CLEAR_PREV_ESP(); - - /* set new EFLAGS */ - set_eflags(new_flags, I_FLAG|IOPL_FLAG|RF_FLAG|VM_FLAG|VIF_FLAG|VIP_FLAG); - - /* check new segregs, ldtr */ for (i = 0; i < CPU_SEGREG_NUM; i++) { - rv = parse_selector(&sreg_sel[i], sreg[i]); - if (rv < 0) { - VERBOSE(("task_switch: selector parse failure: index=%d (sel = 0x%04x, rv = %d)", i, sreg[i], rv)); - EXCEPTION(TS_EXCEPTION, sreg_sel[i].idx); - } - } - rv = parse_selector(&ldtr_sel, ldtr); - if (rv < 0) { - VERBOSE(("task_switch: LDTR selector parse failure (sel = 0x%04x, rv = %d)", ldtr, rv)); - EXCEPTION(TS_EXCEPTION, ldtr_sel.idx); + segdesc_init(i, sreg[i], &CPU_STAT_SREG(i)); + /* invalidate segreg descriptor */ + CPU_STAT_SREG(i).valid = 0; } - /* set new CR3 */ - if (!task16 && CPU_STAT_PAGING) { - set_cr3(cr3); - } + CPU_CLEAR_PREV_ESP(); /* load new LDTR */ + CPU_LDTR_DESC.valid = 0; load_ldtr(ldtr, TS_EXCEPTION); /* I/O deny bitmap */ @@ -482,103 +455,79 @@ task_switch(selector_t *task_sel, task_s } VERBOSE(("task_switch: ioaddr = %08x, limit = %08x", CPU_STAT_IOADDR, CPU_STAT_IOLIMIT)); -#if defined(IA32_SUPPORT_DEBUG_REGISTER) - /* check resume flag */ - if (CPU_EFLAG & RF_FLAG) { - CPU_STAT_BP_EVENT |= CPU_STAT_BP_EVENT_RF; - } - - /* clear local break point flags */ - CPU_DR7 &= ~(CPU_DR7_L(0)|CPU_DR7_L(1)|CPU_DR7_L(2)|CPU_DR7_L(3)|CPU_DR7_LE); - CPU_STAT_BP = 0; - for (i = 0; i < CPU_DEBUG_REG_INDEX_NUM; i++) { - if (CPU_DR7 & CPU_DR7_G(i)) { - CPU_STAT_BP |= (1 << i); - } - } -#endif + /* set new EFLAGS */ + set_eflags(new_flags, I_FLAG|IOPL_FLAG|RF_FLAG|VM_FLAG|VIF_FLAG|VIP_FLAG); /* set new segment register */ - new_cpl = sreg_sel[CPU_CS_INDEX].rpl; - if (CPU_STAT_VM86) { - load_ss(sreg_sel[CPU_SS_INDEX].selector, - &sreg_sel[CPU_SS_INDEX].desc, new_cpl); - LOAD_SEGREG1(CPU_ES_INDEX, sreg_sel[CPU_ES_INDEX].selector, - TS_EXCEPTION); - LOAD_SEGREG1(CPU_DS_INDEX, sreg_sel[CPU_DS_INDEX].selector, - TS_EXCEPTION); - LOAD_SEGREG1(CPU_FS_INDEX, sreg_sel[CPU_FS_INDEX].selector, - TS_EXCEPTION); - LOAD_SEGREG1(CPU_GS_INDEX, sreg_sel[CPU_GS_INDEX].selector, - TS_EXCEPTION); - load_cs(sreg_sel[CPU_CS_INDEX].selector, - &sreg_sel[CPU_CS_INDEX].desc, new_cpl); - } else { - /* load SS */ - - /* SS must be writable data segment */ - if (SEG_IS_SYSTEM(&sreg_sel[CPU_SS_INDEX].desc) - || SEG_IS_CODE(&sreg_sel[CPU_SS_INDEX].desc) - || !SEG_IS_WRITABLE_DATA(&sreg_sel[CPU_SS_INDEX].desc)) { - EXCEPTION(TS_EXCEPTION, sreg_sel[CPU_SS_INDEX].idx); - } - - /* check privilege level */ - if ((sreg_sel[CPU_SS_INDEX].desc.dpl != new_cpl) - || (sreg_sel[CPU_SS_INDEX].desc.dpl != new_cpl)) { - EXCEPTION(TS_EXCEPTION, sreg_sel[CPU_SS_INDEX].idx); - } - - /* stack segment is not present */ - rv = selector_is_not_present(&sreg_sel[CPU_SS_INDEX]); + if (!CPU_STAT_VM86) { + /* load CS */ + rv = parse_selector(&cs_sel, sreg[CPU_CS_INDEX]); if (rv < 0) { - EXCEPTION(SS_EXCEPTION, sreg_sel[CPU_SS_INDEX].idx); + VERBOSE(("task_switch: load CS failure (sel = 0x%04x, rv = %d)", sreg[CPU_CS_INDEX], rv)); + EXCEPTION(TS_EXCEPTION, cs_sel.idx); } - /* Now loading SS register */ - load_ss(sreg_sel[CPU_SS_INDEX].selector, - &sreg_sel[CPU_SS_INDEX].desc, new_cpl); - - /* load ES, DS, FS, GS segment register */ - LOAD_SEGREG1(CPU_ES_INDEX, sreg_sel[CPU_ES_INDEX].selector, - TS_EXCEPTION); - LOAD_SEGREG1(CPU_DS_INDEX, sreg_sel[CPU_DS_INDEX].selector, - TS_EXCEPTION); - LOAD_SEGREG1(CPU_FS_INDEX, sreg_sel[CPU_FS_INDEX].selector, - TS_EXCEPTION); - LOAD_SEGREG1(CPU_GS_INDEX, sreg_sel[CPU_GS_INDEX].selector, - TS_EXCEPTION); - - /* load CS */ - /* CS must be code segment */ - if (SEG_IS_SYSTEM(&sreg_sel[CPU_CS_INDEX].desc) - || SEG_IS_DATA(&sreg_sel[CPU_CS_INDEX].desc)) { - EXCEPTION(TS_EXCEPTION, sreg_sel[CPU_CS_INDEX].idx); + if (SEG_IS_SYSTEM(&cs_sel.desc) || SEG_IS_DATA(&cs_sel.desc)) { + EXCEPTION(TS_EXCEPTION, cs_sel.idx); } /* check privilege level */ - if (!SEG_IS_CONFORMING_CODE(&sreg_sel[CPU_CS_INDEX].desc)) { + if (!SEG_IS_CONFORMING_CODE(&cs_sel.desc)) { /* non-confirming code segment */ - if (sreg_sel[CPU_CS_INDEX].desc.dpl != new_cpl) { - EXCEPTION(TS_EXCEPTION, sreg_sel[CPU_CS_INDEX].idx); + if (cs_sel.desc.dpl != cs_sel.rpl) { + EXCEPTION(TS_EXCEPTION, cs_sel.idx); } } else { /* conforming code segment */ - if (sreg_sel[CPU_CS_INDEX].desc.dpl > new_cpl) { - EXCEPTION(TS_EXCEPTION, sreg_sel[CPU_CS_INDEX].idx); + if (cs_sel.desc.dpl > cs_sel.rpl) { + EXCEPTION(TS_EXCEPTION, cs_sel.idx); } } /* code segment is not present */ - rv = selector_is_not_present(&sreg_sel[CPU_CS_INDEX]); + rv = selector_is_not_present(&cs_sel); if (rv < 0) { - EXCEPTION(NP_EXCEPTION, sreg_sel[CPU_CS_INDEX].idx); + EXCEPTION(NP_EXCEPTION, cs_sel.idx); } + /* load SS */ + rv = parse_selector(&ss_sel, sreg[CPU_SS_INDEX]); + if (rv < 0) { + VERBOSE(("task_switch: load SS failure (sel = 0x%04x, rv = %d)", sreg[CPU_SS_INDEX], rv)); + EXCEPTION(TS_EXCEPTION, ss_sel.idx); + } + + /* SS must be writable data segment */ + if (SEG_IS_SYSTEM(&ss_sel.desc) + || SEG_IS_CODE(&ss_sel.desc) + || !SEG_IS_WRITABLE_DATA(&ss_sel.desc)) { + EXCEPTION(TS_EXCEPTION, ss_sel.idx); + } + + /* check privilege level */ + if ((ss_sel.desc.dpl != cs_sel.rpl) + || (ss_sel.desc.dpl != ss_sel.rpl)) { + EXCEPTION(TS_EXCEPTION, ss_sel.idx); + } + + /* stack segment is not present */ + rv = selector_is_not_present(&ss_sel); + if (rv < 0) { + EXCEPTION(SS_EXCEPTION, ss_sel.idx); + } + + /* Now loading SS register */ + load_ss(ss_sel.selector, &ss_sel.desc, cs_sel.rpl); + + /* load ES, DS, FS, GS segment register */ + LOAD_SEGREG1(CPU_ES_INDEX, sreg[CPU_ES_INDEX], TS_EXCEPTION); + LOAD_SEGREG1(CPU_DS_INDEX, sreg[CPU_DS_INDEX], TS_EXCEPTION); + LOAD_SEGREG1(CPU_FS_INDEX, sreg[CPU_FS_INDEX], TS_EXCEPTION); + LOAD_SEGREG1(CPU_GS_INDEX, sreg[CPU_GS_INDEX], TS_EXCEPTION); + /* Now loading CS register */ - load_cs(sreg_sel[CPU_CS_INDEX].selector, - &sreg_sel[CPU_CS_INDEX].desc, new_cpl); + load_cs(cs_sel.selector, &cs_sel.desc, cs_sel.rpl); } VERBOSE(("task_switch: done."));