--- np2/i386c/ia32/task.c 2004/02/05 16:43:44 1.9 +++ np2/i386c/ia32/task.c 2011/12/20 09:03:28 1.27 @@ -1,5 +1,3 @@ -/* $Id: task.c,v 1.9 2004/02/05 16:43:44 monaka Exp $ */ - /* * Copyright (c) 2003 NONAKA Kimihiro * All rights reserved. @@ -12,8 +10,6 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES @@ -31,35 +27,77 @@ #include "cpu.h" #include "ia32.mcr" +#define TSS_16_SIZE 44 +#define TSS_16_LIMIT (TSS_16_SIZE - 1) +#define TSS_32_SIZE 104 +#define TSS_32_LIMIT (TSS_32_SIZE - 1) + +static void +set_task_busy(UINT16 selector) +{ + UINT32 addr; + UINT32 h; + + addr = CPU_GDTR_BASE + (selector & CPU_SEGMENT_SELECTOR_INDEX_MASK); + h = cpu_kmemoryread_d(addr + 4); + if (!(h & CPU_TSS_H_BUSY)) { + h |= CPU_TSS_H_BUSY; + cpu_kmemorywrite_d(addr + 4, h); + } else { + ia32_panic("set_task_busy: already busy(%04x:%08x)",selector,h); + } +} + +static void +set_task_free(UINT16 selector) +{ + UINT32 addr; + UINT32 h; + + addr = CPU_GDTR_BASE + (selector & CPU_SEGMENT_SELECTOR_INDEX_MASK); + h = cpu_kmemoryread_d(addr + 4); + if (h & CPU_TSS_H_BUSY) { + h &= ~CPU_TSS_H_BUSY; + cpu_kmemorywrite_d(addr + 4, h); + } else { + ia32_panic("set_task_free: already free(%04x:%08x)",selector,h); + } +} void -load_tr(WORD selector) +load_tr(UINT16 selector) { selector_t task_sel; int rv; +#if defined(IA32_SUPPORT_DEBUG_REGISTER) + int i; +#endif + UINT16 iobase; rv = parse_selector(&task_sel, selector); - if (rv < 0 || task_sel.ldt || task_sel.desc.s) { + if (rv < 0 || task_sel.ldt || !SEG_IS_SYSTEM(&task_sel.desc)) { EXCEPTION(GP_EXCEPTION, task_sel.idx); } /* check descriptor type & stack room size */ switch (task_sel.desc.type) { case CPU_SYSDESC_TYPE_TSS_16: - if (task_sel.desc.u.seg.limit < 0x2b) { + if (task_sel.desc.u.seg.limit < TSS_16_LIMIT) { EXCEPTION(TS_EXCEPTION, task_sel.idx); } + iobase = 0; break; case CPU_SYSDESC_TYPE_TSS_32: - if (task_sel.desc.u.seg.limit < 0x67) { + if (task_sel.desc.u.seg.limit < TSS_32_LIMIT) { EXCEPTION(TS_EXCEPTION, task_sel.idx); } + iobase = cpu_kmemoryread_w(task_sel.desc.u.seg.segbase + 102); break; default: EXCEPTION(GP_EXCEPTION, task_sel.idx); - break; + return; } /* not present */ @@ -72,180 +110,202 @@ load_tr(WORD selector) tr_dump(task_sel.selector, task_sel.desc.u.seg.segbase, task_sel.desc.u.seg.limit); #endif - CPU_SET_TASK_BUSY(task_sel.selector, &task_sel.desc); + set_task_busy(task_sel.selector); CPU_TR = task_sel.selector; CPU_TR_DESC = task_sel.desc; + CPU_TR_DESC.type |= CPU_SYSDESC_TYPE_TSS_BUSY_IND; + + /* I/O deny bitmap */ + CPU_STAT_IOLIMIT = 0; + if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { + if (iobase != 0 && iobase < CPU_TR_DESC.u.seg.limit) { + CPU_STAT_IOLIMIT = (UINT16)(CPU_TR_DESC.u.seg.limit - iobase); + CPU_STAT_IOADDR = CPU_TR_DESC.u.seg.segbase + iobase; + } + } + +#if defined(IA32_SUPPORT_DEBUG_REGISTER) + /* clear local break point flags */ + CPU_DR7 &= ~(CPU_DR7_L(0)|CPU_DR7_L(1)|CPU_DR7_L(2)|CPU_DR7_L(3)|CPU_DR7_LE); + CPU_STAT_BP = 0; + for (i = 0; i < CPU_DEBUG_REG_INDEX_NUM; i++) { + if (CPU_DR7 & CPU_DR7_G(i)) { + CPU_STAT_BP |= (1 << i); + } + } +#endif } void -get_stack_from_tss(DWORD pl, WORD *new_ss, DWORD *new_esp) +get_stack_pointer_from_tss(UINT pl, UINT16 *new_ss, UINT32 *new_esp) { - DWORD tss_stack_addr; + UINT32 tss_stack_addr; + + VERBOSE(("get_stack_pointer_from_tss: pl = %d", pl)); + VERBOSE(("CPU_TR type = %d, base = 0x%08x, limit = 0x%08x", CPU_TR_DESC.type, CPU_TR_BASE, CPU_TR_LIMIT)); __ASSERT(pl < 3); if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { tss_stack_addr = pl * 8 + 4; - if (tss_stack_addr + 7 > CPU_TR_DESC.u.seg.limit) { + if (tss_stack_addr + 7 > CPU_TR_LIMIT) { EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); } - tss_stack_addr += CPU_TR_DESC.u.seg.segbase; + tss_stack_addr += CPU_TR_BASE; *new_esp = cpu_kmemoryread_d(tss_stack_addr); *new_ss = cpu_kmemoryread_w(tss_stack_addr + 4); } else if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_16) { tss_stack_addr = pl * 4 + 2; - if (tss_stack_addr + 3 > CPU_TR_DESC.u.seg.limit) { + if (tss_stack_addr + 3 > CPU_TR_LIMIT) { EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); } - tss_stack_addr += CPU_TR_DESC.u.seg.segbase; + tss_stack_addr += CPU_TR_BASE; *new_esp = cpu_kmemoryread_w(tss_stack_addr); *new_ss = cpu_kmemoryread_w(tss_stack_addr + 2); } else { - ia32_panic("get_stack_from_tss: task register is invalid (%d)\n", CPU_TR_DESC.type); + ia32_panic("get_stack_pointer_from_tss: task register is invalid (%d)\n", CPU_TR_DESC.type); } - - VERBOSE(("get_stack_from_tss: pl = %d, new_esp = 0x%08x, new_ss = 0x%04x", pl, *new_esp, *new_ss)); + VERBOSE(("new stack pointer = %04x:%08x", *new_ss, *new_esp)); } -WORD -get_link_selector_from_tss() +UINT16 +get_backlink_selector_from_tss(void) { - WORD backlink; + UINT16 backlink; if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { - if (4 > CPU_TR_DESC.u.seg.limit) { + if (4 > CPU_TR_LIMIT) { EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); } } else if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_16) { - if (2 > CPU_TR_DESC.u.seg.limit) { + if (2 > CPU_TR_LIMIT) { EXCEPTION(TS_EXCEPTION, CPU_TR & ~3); } } else { - ia32_panic("get_link_selector_from_tss: task register is invalid (%d)\n", CPU_TR_DESC.type); + ia32_panic("get_backlink_selector_from_tss: task register has invalid type (%d)\n", CPU_TR_DESC.type); } - backlink = cpu_kmemoryread_w(CPU_TR_DESC.u.seg.segbase); - VERBOSE(("get_link_selector_from_tss: backlink selector = 0x%04x", backlink)); + backlink = cpu_kmemoryread_w(CPU_TR_BASE); + VERBOSE(("get_backlink_selector_from_tss: backlink selector = 0x%04x", backlink)); return backlink; } void -task_switch(selector_t* task_sel, int type) +task_switch(selector_t *task_sel, task_switch_type_t type) { - DWORD regs[CPU_REG_NUM]; - DWORD eip; - DWORD new_flags; - DWORD mask; - DWORD cr3 = 0; - WORD sreg[CPU_SEGREG_NUM]; - WORD ldtr; - WORD t, iobase; + UINT32 regs[CPU_REG_NUM]; + UINT32 eip; + UINT32 new_flags; + UINT32 cr3 = 0; + UINT16 sreg[CPU_SEGREG_NUM]; + UINT16 ldtr; + UINT16 iobase; + UINT16 t; + int new_cpl; - selector_t cs_sel; + selector_t sreg_sel[CPU_SEGREG_NUM]; + selector_t ldtr_sel; int rv; - DWORD cur_base; /* current task state */ - DWORD task_base; /* new task state */ - DWORD old_flags = REAL_EFLAGREG; + UINT32 cur_base, cur_paddr; /* current task state */ + UINT32 task_base, task_paddr; /* new task state */ + UINT32 old_flags = REAL_EFLAGREG; BOOL task16; - DWORD nsreg; - DWORD i; + UINT i; VERBOSE(("task_switch: start")); - /* limit check */ switch (task_sel->desc.type) { case CPU_SYSDESC_TYPE_TSS_32: case CPU_SYSDESC_TYPE_TSS_BUSY_32: - if (task_sel->desc.u.seg.limit < 0x67) { + if (task_sel->desc.u.seg.limit < TSS_32_LIMIT) { EXCEPTION(TS_EXCEPTION, task_sel->idx); } - task16 = FALSE; - nsreg = CPU_SEGREG_NUM; + task16 = 0; break; case CPU_SYSDESC_TYPE_TSS_16: case CPU_SYSDESC_TYPE_TSS_BUSY_16: - if (task_sel->desc.u.seg.limit < 0x2b) { + if (task_sel->desc.u.seg.limit < TSS_16_LIMIT) { EXCEPTION(TS_EXCEPTION, task_sel->idx); } - task16 = TRUE; - nsreg = CPU_SEGREG286_NUM; + task16 = 1; break; default: ia32_panic("task_switch: descriptor type is invalid."); - task16 = FALSE; /* compiler happy */ - nsreg = CPU_SEGREG_NUM; /* compiler happy */ + task16 = 0; /* compiler happy */ break; } - cur_base = CPU_TR_DESC.u.seg.segbase; + cur_base = CPU_TR_BASE; + cur_paddr = laddr_to_paddr(cur_base, CPU_PAGE_WRITE_DATA|CPU_MODE_SUPERVISER); task_base = task_sel->desc.u.seg.segbase; - VERBOSE(("task_switch: cur task (%04x) = 0x%08x:%08x", CPU_TR, cur_base, CPU_TR_DESC.u.seg.limit)); + task_paddr = laddr_to_paddr(task_base, CPU_PAGE_WRITE_DATA|CPU_MODE_SUPERVISER); + VERBOSE(("task_switch: current task (%04x) = 0x%08x:%08x", CPU_TR, cur_base, CPU_TR_LIMIT)); VERBOSE(("task_switch: new task (%04x) = 0x%08x:%08x", task_sel->selector, task_base, task_sel->desc.u.seg.limit)); VERBOSE(("task_switch: %dbit task switch", task16 ? 16 : 32)); #if defined(MORE_DEBUG) { - DWORD v; + UINT32 v; VERBOSE(("task_switch: new task")); for (i = 0; i < task_sel->desc.u.seg.limit; i += 4) { - v = cpu_kmemoryread_d(task_base + i); + v = cpu_memoryread_d(task_paddr + i); VERBOSE(("task_switch: 0x%08x: %08x", task_base + i,v)); } } #endif - if (CPU_STAT_PAGING) { - /* task state paging check */ - paging_check(cur_base, CPU_TR_DESC.u.seg.limit, CPU_PAGE_WRITE_DATA, CPU_MODE_SUPERVISER); - paging_check(task_base, task_sel->desc.u.seg.limit, CPU_PAGE_WRITE_DATA, CPU_MODE_SUPERVISER); - } - /* load task state */ memset(sreg, 0, sizeof(sreg)); if (!task16) { if (CPU_STAT_PAGING) { - cr3 = cpu_kmemoryread_d(task_base + 28); + cr3 = cpu_memoryread_d(task_paddr + 28); } - eip = cpu_kmemoryread_d(task_base + 32); - new_flags = cpu_kmemoryread_d(task_base + 36); + eip = cpu_memoryread_d(task_paddr + 32); + new_flags = cpu_memoryread_d(task_paddr + 36); for (i = 0; i < CPU_REG_NUM; i++) { - regs[i] = cpu_kmemoryread_d(task_base + 40 + i * 4); + regs[i] = cpu_memoryread_d(task_paddr + 40 + i * 4); + } + for (i = 0; i < CPU_SEGREG_NUM; i++) { + sreg[i] = cpu_memoryread_w(task_paddr + 72 + i * 4); } - for (i = 0; i < nsreg; i++) { - sreg[i] = cpu_kmemoryread_w(task_base + 72 + i * 4); + ldtr = cpu_memoryread_w(task_paddr + 96); + t = cpu_memoryread_w(task_paddr + 100); + if (t & 1) { + CPU_STAT_BP_EVENT |= CPU_STAT_BP_EVENT_TASK; } - ldtr = cpu_kmemoryread_w(task_base + 96); - t = cpu_kmemoryread_w(task_base + 100); - t &= 1; - iobase = cpu_kmemoryread_w(task_base + 102); + iobase = cpu_memoryread_w(task_paddr + 102); } else { - eip = cpu_kmemoryread_w(task_base + 14); - new_flags = cpu_kmemoryread_w(task_base + 16); + eip = cpu_memoryread_w(task_paddr + 14); + new_flags = cpu_memoryread_w(task_paddr + 16); for (i = 0; i < CPU_REG_NUM; i++) { - regs[i] = cpu_kmemoryread_w(task_base + 18 + i * 2); + regs[i] = cpu_memoryread_w(task_paddr + 18 + i * 2); } - for (i = 0; i < nsreg; i++) { - sreg[i] = cpu_kmemoryread_w(task_base + 34 + i * 2); + for (i = 0; i < CPU_SEGREG286_NUM; i++) { + sreg[i] = cpu_memoryread_w(task_paddr + 34 + i * 2); } - ldtr = cpu_kmemoryread_w(task_base + 42); - t = 0; + ldtr = cpu_memoryread_w(task_paddr + 42); iobase = 0; + t = 0; } #if defined(DEBUG) VERBOSE(("task_switch: current task")); + if (!task16) { + VERBOSE(("task_switch: CR3 = 0x%08x", CPU_CR3)); + } VERBOSE(("task_switch: eip = 0x%08x", CPU_EIP)); VERBOSE(("task_switch: eflags = 0x%08x", old_flags)); for (i = 0; i < CPU_REG_NUM; i++) { VERBOSE(("task_switch: regs[%d] = 0x%08x", i, CPU_REGS_DWORD(i))); } - for (i = 0; i < nsreg; i++) { + for (i = 0; i < CPU_SEGREG_NUM; i++) { VERBOSE(("task_switch: sreg[%d] = 0x%04x", i, CPU_REGS_SREG(i))); } + VERBOSE(("task_switch: ldtr = 0x%04x", CPU_LDTR)); VERBOSE(("task_switch: new task")); if (!task16) { @@ -256,7 +316,7 @@ task_switch(selector_t* task_sel, int ty for (i = 0; i < CPU_REG_NUM; i++) { VERBOSE(("task_switch: regs[%d] = 0x%08x", i, regs[i])); } - for (i = 0; i < nsreg; i++) { + for (i = 0; i < CPU_SEGREG_NUM; i++) { VERBOSE(("task_switch: sreg[%d] = 0x%04x", i, sreg[i])); } VERBOSE(("task_switch: ldtr = 0x%04x", ldtr)); @@ -275,7 +335,7 @@ task_switch(selector_t* task_sel, int ty /*FALLTHROUGH*/ case TASK_SWITCH_JMP: /* clear busy flags in current task */ - CPU_SET_TASK_FREE(CPU_TR, &CPU_TR_DESC); + set_task_free(CPU_TR); break; case TASK_SWITCH_CALL: @@ -288,44 +348,33 @@ task_switch(selector_t* task_sel, int ty break; } - /* save this task state in this task state segment */ + /* store current task state in current TSS */ if (!task16) { - cpu_kmemorywrite_d(cur_base + 32, CPU_EIP); - cpu_kmemorywrite_d(cur_base + 36, old_flags); + cpu_memorywrite_d(cur_paddr + 32, CPU_EIP); + cpu_memorywrite_d(cur_paddr + 36, old_flags); for (i = 0; i < CPU_REG_NUM; i++) { - cpu_kmemorywrite_d(cur_base + 40 + i * 4, CPU_REGS_DWORD(i)); + cpu_memorywrite_d(cur_paddr + 40 + i * 4, CPU_REGS_DWORD(i)); } - for (i = 0; i < nsreg; i++) { - cpu_kmemorywrite_w(cur_base + 72 + i * 4, CPU_REGS_SREG(i)); + for (i = 0; i < CPU_SEGREG_NUM; i++) { + cpu_memorywrite_w(cur_paddr + 72 + i * 4, CPU_REGS_SREG(i)); } } else { - cpu_kmemorywrite_w(cur_base + 14, CPU_IP); - cpu_kmemorywrite_w(cur_base + 16, (WORD)old_flags); + cpu_memorywrite_w(cur_paddr + 14, CPU_IP); + cpu_memorywrite_w(cur_paddr + 16, (UINT16)old_flags); for (i = 0; i < CPU_REG_NUM; i++) { - cpu_kmemorywrite_w(cur_base + 18 + i * 2, CPU_REGS_WORD(i)); + cpu_memorywrite_w(cur_paddr + 18 + i * 2, CPU_REGS_WORD(i)); } - for (i = 0; i < nsreg; i++) { - cpu_kmemorywrite_w(cur_base + 34 + i * 2, CPU_REGS_SREG(i)); + for (i = 0; i < CPU_SEGREG286_NUM; i++) { + cpu_memorywrite_w(cur_paddr + 34 + i * 2, CPU_REGS_SREG(i)); } } -#if defined(MORE_DEBUG) - { - DWORD v; - - VERBOSE(("task_switch: current task")); - for (i = 0; i < CPU_TR_DESC.u.seg.limit; i += 4) { - v = cpu_kmemoryread_d(cur_base + i); - VERBOSE(("task_switch: 0x%08x: %08x", cur_base + i, v)); - } - } -#endif /* set back link selector */ switch (type) { case TASK_SWITCH_CALL: case TASK_SWITCH_INTR: /* set back link selector */ - cpu_kmemorywrite_w(task_base, CPU_TR); + cpu_memorywrite_w(task_paddr, CPU_TR); break; case TASK_SWITCH_IRET: @@ -338,6 +387,18 @@ task_switch(selector_t* task_sel, int ty break; } +#if defined(MORE_DEBUG) + { + UINT32 v; + + VERBOSE(("task_switch: current task")); + for (i = 0; i < CPU_TR_LIMIT; i += 4) { + v = cpu_memoryread_d(cur_paddr + i); + VERBOSE(("task_switch: 0x%08x: %08x", cur_base + i, v)); + } + } +#endif + /* Now task switching! */ /* if CALL, INTR, set EFLAGS image NT_FLAG */ @@ -349,13 +410,13 @@ task_switch(selector_t* task_sel, int ty new_flags |= NT_FLAG; /*FALLTHROUGH*/ case TASK_SWITCH_JMP: - CPU_SET_TASK_BUSY(task_sel->selector, &task_sel->desc); + set_task_busy(task_sel->selector); break; - + case TASK_SWITCH_IRET: /* check busy flag is active */ - if (task_sel->desc.valid) { - DWORD h; + if (SEG_IS_VALID(&task_sel->desc)) { + UINT32 h; h = cpu_kmemoryread_d(task_sel->addr + 4); if ((h & CPU_TSS_H_BUSY) == 0) { ia32_panic("task_switch: new task is not busy"); @@ -368,103 +429,156 @@ task_switch(selector_t* task_sel, int ty break; } - /* set CR0 image CPU_CR0_TS */ - CPU_CR0 |= CPU_CR0_TS; - /* load task selector to CPU_TR */ CPU_TR = task_sel->selector; CPU_TR_DESC = task_sel->desc; + CPU_TR_DESC.type |= CPU_SYSDESC_TYPE_TSS_BUSY_IND; - /* load task state (CR3, EFLAG, EIP, GPR, segreg, LDTR) */ + /* set CR0 image CPU_CR0_TS */ + CPU_CR0 |= CPU_CR0_TS; - /* set new CR3 */ - if (!task16 && CPU_STAT_PAGING) { - set_CR3(cr3); - } + /* + * load task state (EIP, GPR, EFLAG, segreg, CR3, LDTR) + */ /* set new EIP, GPR */ - CPU_PREV_EIP = CPU_EIP = eip; + CPU_EIP = eip; for (i = 0; i < CPU_REG_NUM; i++) { CPU_REGS_DWORD(i) = regs[i]; } + + CPU_CLEAR_PREV_ESP(); + + /* set new EFLAGS */ + set_eflags(new_flags, I_FLAG|IOPL_FLAG|RF_FLAG|VM_FLAG|VIF_FLAG|VIP_FLAG); + + /* check new segregs, ldtr */ for (i = 0; i < CPU_SEGREG_NUM; i++) { - CPU_REGS_SREG(i) = sreg[i]; - CPU_STAT_SREG_INIT(i); + rv = parse_selector(&sreg_sel[i], sreg[i]); + if (rv < 0) { + VERBOSE(("task_switch: selector parse failure: index=%d (sel = 0x%04x, rv = %d)", i, sreg[i], rv)); + EXCEPTION(TS_EXCEPTION, sreg_sel[i].idx); + } + } + rv = parse_selector(&ldtr_sel, ldtr); + if (rv < 0) { + VERBOSE(("task_switch: LDTR selector parse failure (sel = 0x%04x, rv = %d)", ldtr, rv)); + EXCEPTION(TS_EXCEPTION, ldtr_sel.idx); } - /* set new EFLAGS */ - mask = I_FLAG|IOPL_FLAG|RF_FLAG|VM_FLAG|VIF_FLAG|VIP_FLAG; - set_eflags(new_flags, mask); + /* set new CR3 */ + if (!task16 && CPU_STAT_PAGING) { + set_cr3(cr3); + } /* load new LDTR */ load_ldtr(ldtr, TS_EXCEPTION); + /* I/O deny bitmap */ + CPU_STAT_IOLIMIT = 0; + if (!task16 && iobase != 0 && iobase < CPU_TR_DESC.u.seg.limit) { + CPU_STAT_IOLIMIT = (UINT16)(CPU_TR_DESC.u.seg.limit - iobase); + CPU_STAT_IOADDR = task_base + iobase; + } + VERBOSE(("task_switch: ioaddr = %08x, limit = %08x", CPU_STAT_IOADDR, CPU_STAT_IOLIMIT)); + +#if defined(IA32_SUPPORT_DEBUG_REGISTER) + /* check resume flag */ + if (CPU_EFLAG & RF_FLAG) { + CPU_STAT_BP_EVENT |= CPU_STAT_BP_EVENT_RF; + } + + /* clear local break point flags */ + CPU_DR7 &= ~(CPU_DR7_L(0)|CPU_DR7_L(1)|CPU_DR7_L(2)|CPU_DR7_L(3)|CPU_DR7_LE); + CPU_STAT_BP = 0; + for (i = 0; i < CPU_DEBUG_REG_INDEX_NUM; i++) { + if (CPU_DR7 & CPU_DR7_G(i)) { + CPU_STAT_BP |= (1 << i); + } + } +#endif + /* set new segment register */ - if (!CPU_STAT_VM86) { - /* clear segment descriptor cache */ - for (i = 0; i < CPU_SEGREG_NUM; i++) { - CPU_STAT_SREG_CLEAR(i); + new_cpl = sreg_sel[CPU_CS_INDEX].rpl; + if (CPU_STAT_VM86) { + load_ss(sreg_sel[CPU_SS_INDEX].selector, + &sreg_sel[CPU_SS_INDEX].desc, new_cpl); + LOAD_SEGREG1(CPU_ES_INDEX, sreg_sel[CPU_ES_INDEX].selector, + TS_EXCEPTION); + LOAD_SEGREG1(CPU_DS_INDEX, sreg_sel[CPU_DS_INDEX].selector, + TS_EXCEPTION); + LOAD_SEGREG1(CPU_FS_INDEX, sreg_sel[CPU_FS_INDEX].selector, + TS_EXCEPTION); + LOAD_SEGREG1(CPU_GS_INDEX, sreg_sel[CPU_GS_INDEX].selector, + TS_EXCEPTION); + load_cs(sreg_sel[CPU_CS_INDEX].selector, + &sreg_sel[CPU_CS_INDEX].desc, new_cpl); + } else { + /* load SS */ + + /* SS must be writable data segment */ + if (SEG_IS_SYSTEM(&sreg_sel[CPU_SS_INDEX].desc) + || SEG_IS_CODE(&sreg_sel[CPU_SS_INDEX].desc) + || !SEG_IS_WRITABLE_DATA(&sreg_sel[CPU_SS_INDEX].desc)) { + EXCEPTION(TS_EXCEPTION, sreg_sel[CPU_SS_INDEX].idx); } - /* load CS */ - rv = parse_selector(&cs_sel, sreg[CPU_CS_INDEX]); + /* check privilege level */ + if ((sreg_sel[CPU_SS_INDEX].desc.dpl != new_cpl) + || (sreg_sel[CPU_SS_INDEX].desc.dpl != new_cpl)) { + EXCEPTION(TS_EXCEPTION, sreg_sel[CPU_SS_INDEX].idx); + } + + /* stack segment is not present */ + rv = selector_is_not_present(&sreg_sel[CPU_SS_INDEX]); if (rv < 0) { - VERBOSE(("task_switch: load CS failure (sel = 0x%04x, rv = %d)", sreg[CPU_CS_INDEX], rv)); - EXCEPTION(TS_EXCEPTION, cs_sel.idx); + EXCEPTION(SS_EXCEPTION, sreg_sel[CPU_SS_INDEX].idx); } - /* CS register must be code segment */ - if (!cs_sel.desc.s || !cs_sel.desc.u.seg.c) { - EXCEPTION(TS_EXCEPTION, cs_sel.idx); + /* Now loading SS register */ + load_ss(sreg_sel[CPU_SS_INDEX].selector, + &sreg_sel[CPU_SS_INDEX].desc, new_cpl); + + /* load ES, DS, FS, GS segment register */ + LOAD_SEGREG1(CPU_ES_INDEX, sreg_sel[CPU_ES_INDEX].selector, + TS_EXCEPTION); + LOAD_SEGREG1(CPU_DS_INDEX, sreg_sel[CPU_DS_INDEX].selector, + TS_EXCEPTION); + LOAD_SEGREG1(CPU_FS_INDEX, sreg_sel[CPU_FS_INDEX].selector, + TS_EXCEPTION); + LOAD_SEGREG1(CPU_GS_INDEX, sreg_sel[CPU_GS_INDEX].selector, + TS_EXCEPTION); + + /* load CS */ + + /* CS must be code segment */ + if (SEG_IS_SYSTEM(&sreg_sel[CPU_CS_INDEX].desc) + || SEG_IS_DATA(&sreg_sel[CPU_CS_INDEX].desc)) { + EXCEPTION(TS_EXCEPTION, sreg_sel[CPU_CS_INDEX].idx); } /* check privilege level */ - if (!cs_sel.desc.u.seg.ec) { + if (!SEG_IS_CONFORMING_CODE(&sreg_sel[CPU_CS_INDEX].desc)) { /* non-confirming code segment */ - if (cs_sel.desc.dpl != cs_sel.rpl) { - EXCEPTION(TS_EXCEPTION, cs_sel.idx); + if (sreg_sel[CPU_CS_INDEX].desc.dpl != new_cpl) { + EXCEPTION(TS_EXCEPTION, sreg_sel[CPU_CS_INDEX].idx); } } else { - /* confirming code segment */ - if (cs_sel.desc.dpl < cs_sel.rpl) { - EXCEPTION(TS_EXCEPTION, cs_sel.idx); + /* conforming code segment */ + if (sreg_sel[CPU_CS_INDEX].desc.dpl > new_cpl) { + EXCEPTION(TS_EXCEPTION, sreg_sel[CPU_CS_INDEX].idx); } } /* code segment is not present */ - rv = selector_is_not_present(&cs_sel); + rv = selector_is_not_present(&sreg_sel[CPU_CS_INDEX]); if (rv < 0) { - EXCEPTION(NP_EXCEPTION, cs_sel.idx); + EXCEPTION(NP_EXCEPTION, sreg_sel[CPU_CS_INDEX].idx); } /* Now loading CS register */ - load_cs(cs_sel.selector, &cs_sel.desc, cs_sel.desc.dpl); - - /* load ES, SS, DS, FS, GS segment register */ - for (i = 0; i < CPU_SEGREG_NUM; i++) { - if (i != CPU_CS_INDEX) { - load_segreg(i, sreg[i], TS_EXCEPTION); - } - } - } - - /* I/O deny bitmap */ - if (!task16) { - if (task_sel->desc.u.seg.limit > iobase) { - CPU_STAT_IOLIMIT = task_sel->desc.u.seg.limit - iobase; - CPU_STAT_IOADDR = task_sel->desc.u.seg.segbase + iobase; - } else { - CPU_STAT_IOLIMIT = 0; - } - } else { - CPU_STAT_IOLIMIT = 0; - } - VERBOSE(("task_switch: ioaddr = %08x, limit = %08x", CPU_STAT_IOADDR, CPU_STAT_IOLIMIT)); - - /* out of range */ - if (CPU_EIP > CPU_STAT_CS_LIMIT) { - VERBOSE(("task_switch: new_ip is out of range. new_ip = %08x, limit = %08x", CPU_EIP, CPU_STAT_CS_LIMIT)); - EXCEPTION(GP_EXCEPTION, 0); + load_cs(sreg_sel[CPU_CS_INDEX].selector, + &sreg_sel[CPU_CS_INDEX].desc, new_cpl); } VERBOSE(("task_switch: done."));