| version 1.26, 2011/12/17 02:08:04 | version 1.37, 2012/02/07 08:14:07 | 
| Line 32 | Line 32 | 
 | #define TSS_32_SIZE     104 | #define TSS_32_SIZE     104 | 
 | #define TSS_32_LIMIT    (TSS_32_SIZE - 1) | #define TSS_32_LIMIT    (TSS_32_SIZE - 1) | 
 |  |  | 
| static void | static void CPUCALL | 
 | set_task_busy(UINT16 selector) | set_task_busy(UINT16 selector) | 
 | { | { | 
 | UINT32 addr; | UINT32 addr; | 
| Line 44  set_task_busy(UINT16 selector) | Line 44  set_task_busy(UINT16 selector) | 
 | h |= CPU_TSS_H_BUSY; | h |= CPU_TSS_H_BUSY; | 
 | cpu_kmemorywrite_d(addr + 4, h); | cpu_kmemorywrite_d(addr + 4, h); | 
 | } else { | } else { | 
| ia32_panic("set_task_busy: already busy(%04x:%08x)",selector,h); | ia32_panic("set_task_busy: already busy(%04x:%08x)", | 
|  | selector, h); | 
 | } | } | 
 | } | } | 
 |  |  | 
| static void | static void CPUCALL | 
 | set_task_free(UINT16 selector) | set_task_free(UINT16 selector) | 
 | { | { | 
 | UINT32 addr; | UINT32 addr; | 
| Line 60  set_task_free(UINT16 selector) | Line 61  set_task_free(UINT16 selector) | 
 | h &= ~CPU_TSS_H_BUSY; | h &= ~CPU_TSS_H_BUSY; | 
 | cpu_kmemorywrite_d(addr + 4, h); | cpu_kmemorywrite_d(addr + 4, h); | 
 | } else { | } else { | 
| ia32_panic("set_task_free: already free(%04x:%08x)",selector,h); | ia32_panic("set_task_free: already free(%04x:%08x)", | 
|  | selector, h); | 
 | } | } | 
 | } | } | 
 |  |  | 
| void | void CPUCALL | 
 | load_tr(UINT16 selector) | load_tr(UINT16 selector) | 
 | { | { | 
 | selector_t task_sel; | selector_t task_sel; | 
 | int rv; | int rv; | 
 | #if defined(IA32_SUPPORT_DEBUG_REGISTER) |  | 
 | int i; |  | 
 | #endif |  | 
 | UINT16 iobase; | UINT16 iobase; | 
 |  |  | 
 | rv = parse_selector(&task_sel, selector); | rv = parse_selector(&task_sel, selector); | 
| Line 118  load_tr(UINT16 selector) | Line 117  load_tr(UINT16 selector) | 
 | /* I/O deny bitmap */ | /* I/O deny bitmap */ | 
 | CPU_STAT_IOLIMIT = 0; | CPU_STAT_IOLIMIT = 0; | 
 | if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { | if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { | 
| if (iobase != 0 && iobase < CPU_TR_DESC.u.seg.limit) { | if (iobase < CPU_TR_LIMIT) { | 
| CPU_STAT_IOLIMIT = (UINT16)(CPU_TR_DESC.u.seg.limit - iobase); | CPU_STAT_IOLIMIT = (UINT16)(CPU_TR_LIMIT - iobase); | 
| CPU_STAT_IOADDR = CPU_TR_DESC.u.seg.segbase + iobase; | CPU_STAT_IOADDR = CPU_TR_BASE + iobase; | 
|  | VERBOSE(("load_tr: enable ioport control: iobase=0x%04x, base=0x%08x, limit=0x%08x", iobase, CPU_STAT_IOADDR, CPU_STAT_IOLIMIT)); | 
 | } | } | 
 | } | } | 
|  | if (CPU_STAT_IOLIMIT == 0) { | 
| #if defined(IA32_SUPPORT_DEBUG_REGISTER) | VERBOSE(("load_tr: disable ioport control.")); | 
| /* clear local break point flags */ |  | 
| CPU_DR7 &= ~(CPU_DR7_L(0)|CPU_DR7_L(1)|CPU_DR7_L(2)|CPU_DR7_L(3)|CPU_DR7_LE); |  | 
| CPU_STAT_BP = 0; |  | 
| for (i = 0; i < CPU_DEBUG_REG_INDEX_NUM; i++) { |  | 
| if (CPU_DR7 & CPU_DR7_G(i)) { |  | 
| CPU_STAT_BP |= (1 << i); |  | 
| } |  | 
 | } | } | 
 | #endif |  | 
 | } | } | 
 |  |  | 
| void | void CPUCALL | 
 | get_stack_pointer_from_tss(UINT pl, UINT16 *new_ss, UINT32 *new_esp) | get_stack_pointer_from_tss(UINT pl, UINT16 *new_ss, UINT32 *new_esp) | 
 | { | { | 
 | UINT32 tss_stack_addr; | UINT32 tss_stack_addr; | 
 |  |  | 
 | VERBOSE(("get_stack_pointer_from_tss: pl = %d", pl)); | VERBOSE(("get_stack_pointer_from_tss: pl = %d", pl)); | 
| VERBOSE(("CPU_TR type = %d, base = 0x%08x, limit = 0x%08x", CPU_TR_DESC.type, CPU_TR_BASE, CPU_TR_LIMIT)); | VERBOSE(("get_stack_pointer_from_tss: CPU_TR type = %d, base = 0x%08x, limit = 0x%08x", CPU_TR_DESC.type, CPU_TR_BASE, CPU_TR_LIMIT)); | 
 |  |  | 
 | __ASSERT(pl < 3); | __ASSERT(pl < 3); | 
 |  |  | 
| Line 165  get_stack_pointer_from_tss(UINT pl, UINT | Line 157  get_stack_pointer_from_tss(UINT pl, UINT | 
 | } else { | } else { | 
 | ia32_panic("get_stack_pointer_from_tss: task register is invalid (%d)\n", CPU_TR_DESC.type); | ia32_panic("get_stack_pointer_from_tss: task register is invalid (%d)\n", CPU_TR_DESC.type); | 
 | } | } | 
| VERBOSE(("new stack pointer = %04x:%08x", *new_ss, *new_esp)); | VERBOSE(("get_stack_pointer_from_tss: new stack pointer = %04x:%08x", | 
|  | *new_ss, *new_esp)); | 
 | } | } | 
 |  |  | 
 | UINT16 | UINT16 | 
| Line 186  get_backlink_selector_from_tss(void) | Line 179  get_backlink_selector_from_tss(void) | 
 | } | } | 
 |  |  | 
 | backlink = cpu_kmemoryread_w(CPU_TR_BASE); | backlink = cpu_kmemoryread_w(CPU_TR_BASE); | 
| VERBOSE(("get_backlink_selector_from_tss: backlink selector = 0x%04x", backlink)); | VERBOSE(("get_backlink_selector_from_tss: backlink selector = 0x%04x", | 
|  | backlink)); | 
 | return backlink; | return backlink; | 
 | } | } | 
 |  |  | 
| void | void CPUCALL | 
 | task_switch(selector_t *task_sel, task_switch_type_t type) | task_switch(selector_t *task_sel, task_switch_type_t type) | 
 | { | { | 
 | UINT32 regs[CPU_REG_NUM]; | UINT32 regs[CPU_REG_NUM]; | 
| Line 240  task_switch(selector_t *task_sel, task_s | Line 234  task_switch(selector_t *task_sel, task_s | 
 | cur_paddr = laddr_to_paddr(cur_base, CPU_PAGE_WRITE_DATA|CPU_MODE_SUPERVISER); | cur_paddr = laddr_to_paddr(cur_base, CPU_PAGE_WRITE_DATA|CPU_MODE_SUPERVISER); | 
 | task_base = task_sel->desc.u.seg.segbase; | task_base = task_sel->desc.u.seg.segbase; | 
 | task_paddr = laddr_to_paddr(task_base, CPU_PAGE_WRITE_DATA|CPU_MODE_SUPERVISER); | task_paddr = laddr_to_paddr(task_base, CPU_PAGE_WRITE_DATA|CPU_MODE_SUPERVISER); | 
| VERBOSE(("task_switch: current task (%04x) = 0x%08x:%08x", CPU_TR, cur_base, CPU_TR_LIMIT)); | VERBOSE(("task_switch: current task (%04x) = 0x%08x:%08x (p0x%08x)", | 
| VERBOSE(("task_switch: new task (%04x) = 0x%08x:%08x", task_sel->selector, task_base, task_sel->desc.u.seg.limit)); | CPU_TR, cur_base, CPU_TR_LIMIT, cur_paddr)); | 
|  | VERBOSE(("task_switch: new task (%04x) = 0x%08x:%08x (p0x%08x)", | 
|  | task_sel->selector, task_base, task_sel->desc.u.seg.limit, | 
|  | task_paddr)); | 
 | VERBOSE(("task_switch: %dbit task switch", task16 ? 16 : 32)); | VERBOSE(("task_switch: %dbit task switch", task16 ? 16 : 32)); | 
 |  |  | 
 | #if defined(MORE_DEBUG) | #if defined(MORE_DEBUG) | 
| { | VERBOSE(("task_switch: new task")); | 
| UINT32 v; | for (i = 0; i < task_sel->desc.u.seg.limit; i += 4) { | 
|  | VERBOSE(("task_switch: 0x%08x: %08x", task_base + i, | 
| VERBOSE(("task_switch: new task")); | cpu_memoryread_d(task_paddr + i))); | 
| for (i = 0; i < task_sel->desc.u.seg.limit; i += 4) { |  | 
| v = cpu_memoryread_d(task_paddr + i); |  | 
| VERBOSE(("task_switch: 0x%08x: %08x", task_base + i,v)); |  | 
| } |  | 
 | } | } | 
 | #endif | #endif | 
 |  |  | 
| Line 342  task_switch(selector_t *task_sel, task_s | Line 335  task_switch(selector_t *task_sel, task_s | 
 | break; | break; | 
 |  |  | 
 | default: | default: | 
| ia32_panic("task_switch(): task switch type is invalid"); | ia32_panic("task_switch: task switch type is invalid"); | 
 | break; | break; | 
 | } | } | 
 |  |  | 
| Line 351  task_switch(selector_t *task_sel, task_s | Line 344  task_switch(selector_t *task_sel, task_s | 
 | cpu_memorywrite_d(cur_paddr + 32, CPU_EIP); | cpu_memorywrite_d(cur_paddr + 32, CPU_EIP); | 
 | cpu_memorywrite_d(cur_paddr + 36, old_flags); | cpu_memorywrite_d(cur_paddr + 36, old_flags); | 
 | for (i = 0; i < CPU_REG_NUM; i++) { | for (i = 0; i < CPU_REG_NUM; i++) { | 
| cpu_memorywrite_d(cur_paddr + 40 + i * 4, CPU_REGS_DWORD(i)); | cpu_memorywrite_d(cur_paddr + 40 + i * 4, | 
|  | CPU_REGS_DWORD(i)); | 
 | } | } | 
 | for (i = 0; i < CPU_SEGREG_NUM; i++) { | for (i = 0; i < CPU_SEGREG_NUM; i++) { | 
| cpu_memorywrite_w(cur_paddr + 72 + i * 4, CPU_REGS_SREG(i)); | cpu_memorywrite_w(cur_paddr + 72 + i * 4, | 
|  | CPU_REGS_SREG(i)); | 
 | } | } | 
 | } else { | } else { | 
 | cpu_memorywrite_w(cur_paddr + 14, CPU_IP); | cpu_memorywrite_w(cur_paddr + 14, CPU_IP); | 
 | cpu_memorywrite_w(cur_paddr + 16, (UINT16)old_flags); | cpu_memorywrite_w(cur_paddr + 16, (UINT16)old_flags); | 
 | for (i = 0; i < CPU_REG_NUM; i++) { | for (i = 0; i < CPU_REG_NUM; i++) { | 
| cpu_memorywrite_w(cur_paddr + 18 + i * 2, CPU_REGS_WORD(i)); | cpu_memorywrite_w(cur_paddr + 18 + i * 2, | 
|  | CPU_REGS_WORD(i)); | 
 | } | } | 
 | for (i = 0; i < CPU_SEGREG286_NUM; i++) { | for (i = 0; i < CPU_SEGREG286_NUM; i++) { | 
| cpu_memorywrite_w(cur_paddr + 34 + i * 2, CPU_REGS_SREG(i)); | cpu_memorywrite_w(cur_paddr + 34 + i * 2, | 
|  | CPU_REGS_SREG(i)); | 
 | } | } | 
 | } | } | 
 |  |  | 
| Line 381  task_switch(selector_t *task_sel, task_s | Line 378  task_switch(selector_t *task_sel, task_s | 
 | break; | break; | 
 |  |  | 
 | default: | default: | 
| ia32_panic("task_switch(): task switch type is invalid"); | ia32_panic("task_switch: task switch type is invalid"); | 
 | break; | break; | 
 | } | } | 
 |  |  | 
 | #if defined(MORE_DEBUG) | #if defined(MORE_DEBUG) | 
| { | VERBOSE(("task_switch: current task")); | 
| UINT32 v; | for (i = 0; i < CPU_TR_LIMIT; i += 4) { | 
|  | VERBOSE(("task_switch: 0x%08x: %08x", cur_base + i, | 
| VERBOSE(("task_switch: current task")); | cpu_memoryread_d(cur_paddr + i))); | 
| for (i = 0; i < CPU_TR_LIMIT; i += 4) { |  | 
| v = cpu_memoryread_d(cur_paddr + i); |  | 
| VERBOSE(("task_switch: 0x%08x: %08x", cur_base + i, v)); |  | 
| } |  | 
 | } | } | 
 | #endif | #endif | 
 |  |  | 
| Line 423  task_switch(selector_t *task_sel, task_s | Line 416  task_switch(selector_t *task_sel, task_s | 
 | break; | break; | 
 |  |  | 
 | default: | default: | 
| ia32_panic("task_switch(): task switch type is invalid"); | ia32_panic("task_switch: task switch type is invalid"); | 
 | break; | break; | 
 | } | } | 
 |  |  | 
| Line 436  task_switch(selector_t *task_sel, task_s | Line 429  task_switch(selector_t *task_sel, task_s | 
 | CPU_CR0 |= CPU_CR0_TS; | CPU_CR0 |= CPU_CR0_TS; | 
 |  |  | 
 | /* | /* | 
| * load task state (CR3, EFLAG, EIP, GPR, segreg, LDTR) | * load task state (CR3, EIP, GPR, segregs, LDTR, EFLAGS) | 
 | */ | */ | 
 |  |  | 
 | /* set new CR3 */ | /* set new CR3 */ | 
| Line 444  task_switch(selector_t *task_sel, task_s | Line 437  task_switch(selector_t *task_sel, task_s | 
 | set_cr3(cr3); | set_cr3(cr3); | 
 | } | } | 
 |  |  | 
| /* set new EIP, GPR */ | /* set new EIP, GPR, segregs */ | 
 | CPU_EIP = eip; | CPU_EIP = eip; | 
 | for (i = 0; i < CPU_REG_NUM; i++) { | for (i = 0; i < CPU_REG_NUM; i++) { | 
 | CPU_REGS_DWORD(i) = regs[i]; | CPU_REGS_DWORD(i) = regs[i]; | 
 | } | } | 
 | for (i = 0; i < CPU_SEGREG_NUM; i++) { | for (i = 0; i < CPU_SEGREG_NUM; i++) { | 
 | segdesc_init(i, sreg[i], &CPU_STAT_SREG(i)); | segdesc_init(i, sreg[i], &CPU_STAT_SREG(i)); | 
 |  | /* invalidate segreg descriptor */ | 
 |  | CPU_STAT_SREG(i).valid = 0; | 
 | } | } | 
 |  |  | 
 |  | CPU_CLEAR_PREV_ESP(); | 
 |  |  | 
 | /* load new LDTR */ | /* load new LDTR */ | 
 |  | CPU_LDTR_DESC.valid = 0; | 
 | load_ldtr(ldtr, TS_EXCEPTION); | load_ldtr(ldtr, TS_EXCEPTION); | 
 |  |  | 
 | /* I/O deny bitmap */ | /* I/O deny bitmap */ | 
| Line 462  task_switch(selector_t *task_sel, task_s | Line 460  task_switch(selector_t *task_sel, task_s | 
 | CPU_STAT_IOLIMIT = (UINT16)(CPU_TR_DESC.u.seg.limit - iobase); | CPU_STAT_IOLIMIT = (UINT16)(CPU_TR_DESC.u.seg.limit - iobase); | 
 | CPU_STAT_IOADDR = task_base + iobase; | CPU_STAT_IOADDR = task_base + iobase; | 
 | } | } | 
| VERBOSE(("task_switch: ioaddr = %08x, limit = %08x", CPU_STAT_IOADDR, CPU_STAT_IOLIMIT)); | VERBOSE(("task_switch: ioaddr = %08x, limit = %08x", CPU_STAT_IOADDR, | 
|  | CPU_STAT_IOLIMIT)); | 
| #if defined(IA32_SUPPORT_DEBUG_REGISTER) |  | 
| /* check resume flag */ |  | 
| if (CPU_EFLAG & RF_FLAG) { |  | 
| CPU_STAT_BP_EVENT |= CPU_STAT_BP_EVENT_RF; |  | 
| } |  | 
|  |  | 
| /* clear local break point flags */ |  | 
| CPU_DR7 &= ~(CPU_DR7_L(0)|CPU_DR7_L(1)|CPU_DR7_L(2)|CPU_DR7_L(3)|CPU_DR7_LE); |  | 
| CPU_STAT_BP = 0; |  | 
| for (i = 0; i < CPU_DEBUG_REG_INDEX_NUM; i++) { |  | 
| if (CPU_DR7 & CPU_DR7_G(i)) { |  | 
| CPU_STAT_BP |= (1 << i); |  | 
| } |  | 
| } |  | 
| #endif |  | 
 |  |  | 
 | /* set new EFLAGS */ | /* set new EFLAGS */ | 
 | set_eflags(new_flags, I_FLAG|IOPL_FLAG|RF_FLAG|VM_FLAG|VIF_FLAG|VIP_FLAG); | set_eflags(new_flags, I_FLAG|IOPL_FLAG|RF_FLAG|VM_FLAG|VIF_FLAG|VIP_FLAG); | 
 |  |  | 
 | /* set new segment register */ | /* set new segment register */ | 
 | if (!CPU_STAT_VM86) { | if (!CPU_STAT_VM86) { | 
 | /* clear segment descriptor cache */ |  | 
 | for (i = 0; i < CPU_SEGREG_NUM; i++) { |  | 
 | segdesc_clear(&CPU_STAT_SREG(i)); |  | 
 | } |  | 
 |  |  | 
 | /* load CS */ | /* load CS */ | 
 | rv = parse_selector(&cs_sel, sreg[CPU_CS_INDEX]); | rv = parse_selector(&cs_sel, sreg[CPU_CS_INDEX]); | 
 | if (rv < 0) { | if (rv < 0) { | 
| Line 547  task_switch(selector_t *task_sel, task_s | Line 525  task_switch(selector_t *task_sel, task_s | 
 | EXCEPTION(SS_EXCEPTION, ss_sel.idx); | EXCEPTION(SS_EXCEPTION, ss_sel.idx); | 
 | } | } | 
 |  |  | 
| /* Now loading CS/SS register */ | /* Now loading SS register */ | 
| load_cs(cs_sel.selector, &cs_sel.desc, cs_sel.rpl); |  | 
 | load_ss(ss_sel.selector, &ss_sel.desc, cs_sel.rpl); | load_ss(ss_sel.selector, &ss_sel.desc, cs_sel.rpl); | 
 |  |  | 
 | /* load ES, DS, FS, GS segment register */ | /* load ES, DS, FS, GS segment register */ | 
| for (i = 0; i < CPU_SEGREG_NUM; i++) { | LOAD_SEGREG1(CPU_ES_INDEX, sreg[CPU_ES_INDEX], TS_EXCEPTION); | 
| if (i != CPU_CS_INDEX || i != CPU_SS_INDEX) { | LOAD_SEGREG1(CPU_DS_INDEX, sreg[CPU_DS_INDEX], TS_EXCEPTION); | 
| LOAD_SEGREG1(i, sreg[i], TS_EXCEPTION); | LOAD_SEGREG1(CPU_FS_INDEX, sreg[CPU_FS_INDEX], TS_EXCEPTION); | 
| } | LOAD_SEGREG1(CPU_GS_INDEX, sreg[CPU_GS_INDEX], TS_EXCEPTION); | 
| } |  | 
| } |  | 
 |  |  | 
| /* out of range */ | /* Now loading CS register */ | 
| if (CPU_EIP > CPU_STAT_CS_LIMIT) { | load_cs(cs_sel.selector, &cs_sel.desc, cs_sel.rpl); | 
| VERBOSE(("task_switch: new_ip is out of range. new_ip = %08x, limit = %08x", CPU_EIP, CPU_STAT_CS_LIMIT)); |  | 
| EXCEPTION(GP_EXCEPTION, 0); |  | 
 | } | } | 
 |  |  | 
 | VERBOSE(("task_switch: done.")); | VERBOSE(("task_switch: done.")); |