| version 1.23, 2008/03/22 04:03:08 | version 1.26, 2011/12/17 02:08:04 | 
| Line 1 | Line 1 | 
 | /*      $Id$    */ |  | 
 |  |  | 
 | /* | /* | 
 | * Copyright (c) 2003 NONAKA Kimihiro | * Copyright (c) 2003 NONAKA Kimihiro | 
 | * All rights reserved. | * All rights reserved. | 
| Line 29 | Line 27 | 
 | #include "cpu.h" | #include "cpu.h" | 
 | #include "ia32.mcr" | #include "ia32.mcr" | 
 |  |  | 
| #define TSS_SIZE_16     44 | #define TSS_16_SIZE     44 | 
| #define TSS_SIZE_32     108 | #define TSS_16_LIMIT    (TSS_16_SIZE - 1) | 
|  | #define TSS_32_SIZE     104 | 
|  | #define TSS_32_LIMIT    (TSS_32_SIZE - 1) | 
 |  |  | 
 | static void | static void | 
| set_task_busy(UINT16 selector, descriptor_t *sdp) | set_task_busy(UINT16 selector) | 
 | { | { | 
 | UINT32 addr; | UINT32 addr; | 
 | UINT32 h; | UINT32 h; | 
| Line 41  set_task_busy(UINT16 selector, descripto | Line 41  set_task_busy(UINT16 selector, descripto | 
 | addr = CPU_GDTR_BASE + (selector & CPU_SEGMENT_SELECTOR_INDEX_MASK); | addr = CPU_GDTR_BASE + (selector & CPU_SEGMENT_SELECTOR_INDEX_MASK); | 
 | h = cpu_kmemoryread_d(addr + 4); | h = cpu_kmemoryread_d(addr + 4); | 
 | if (!(h & CPU_TSS_H_BUSY)) { | if (!(h & CPU_TSS_H_BUSY)) { | 
 | sdp->type |= CPU_SYSDESC_TYPE_TSS_BUSY_IND; |  | 
 | h |= CPU_TSS_H_BUSY; | h |= CPU_TSS_H_BUSY; | 
 | cpu_kmemorywrite_d(addr + 4, h); | cpu_kmemorywrite_d(addr + 4, h); | 
 | } else { | } else { | 
| Line 50  set_task_busy(UINT16 selector, descripto | Line 49  set_task_busy(UINT16 selector, descripto | 
 | } | } | 
 |  |  | 
 | static void | static void | 
| set_task_free(UINT16 selector, descriptor_t *sdp) | set_task_free(UINT16 selector) | 
 | { | { | 
 | UINT32 addr; | UINT32 addr; | 
 | UINT32 h; | UINT32 h; | 
| Line 58  set_task_free(UINT16 selector, descripto | Line 57  set_task_free(UINT16 selector, descripto | 
 | addr = CPU_GDTR_BASE + (selector & CPU_SEGMENT_SELECTOR_INDEX_MASK); | addr = CPU_GDTR_BASE + (selector & CPU_SEGMENT_SELECTOR_INDEX_MASK); | 
 | h = cpu_kmemoryread_d(addr + 4); | h = cpu_kmemoryread_d(addr + 4); | 
 | if (h & CPU_TSS_H_BUSY) { | if (h & CPU_TSS_H_BUSY) { | 
 | sdp->type &= ~CPU_SYSDESC_TYPE_TSS_BUSY_IND; |  | 
 | h &= ~CPU_TSS_H_BUSY; | h &= ~CPU_TSS_H_BUSY; | 
 | cpu_kmemorywrite_d(addr + 4, h); | cpu_kmemorywrite_d(addr + 4, h); | 
 | } else { | } else { | 
| Line 84  load_tr(UINT16 selector) | Line 82  load_tr(UINT16 selector) | 
 | /* check descriptor type & stack room size */ | /* check descriptor type & stack room size */ | 
 | switch (task_sel.desc.type) { | switch (task_sel.desc.type) { | 
 | case CPU_SYSDESC_TYPE_TSS_16: | case CPU_SYSDESC_TYPE_TSS_16: | 
| if (task_sel.desc.u.seg.limit < TSS_SIZE_16) { | if (task_sel.desc.u.seg.limit < TSS_16_LIMIT) { | 
 | EXCEPTION(TS_EXCEPTION, task_sel.idx); | EXCEPTION(TS_EXCEPTION, task_sel.idx); | 
 | } | } | 
 | iobase = 0; | iobase = 0; | 
 | break; | break; | 
 |  |  | 
 | case CPU_SYSDESC_TYPE_TSS_32: | case CPU_SYSDESC_TYPE_TSS_32: | 
| if (task_sel.desc.u.seg.limit < TSS_SIZE_32) { | if (task_sel.desc.u.seg.limit < TSS_32_LIMIT) { | 
 | EXCEPTION(TS_EXCEPTION, task_sel.idx); | EXCEPTION(TS_EXCEPTION, task_sel.idx); | 
 | } | } | 
 | iobase = cpu_kmemoryread_w(task_sel.desc.u.seg.segbase + 102); | iobase = cpu_kmemoryread_w(task_sel.desc.u.seg.segbase + 102); | 
| Line 112  load_tr(UINT16 selector) | Line 110  load_tr(UINT16 selector) | 
 | tr_dump(task_sel.selector, task_sel.desc.u.seg.segbase, task_sel.desc.u.seg.limit); | tr_dump(task_sel.selector, task_sel.desc.u.seg.segbase, task_sel.desc.u.seg.limit); | 
 | #endif | #endif | 
 |  |  | 
| set_task_busy(task_sel.selector, &task_sel.desc); | set_task_busy(task_sel.selector); | 
 | CPU_TR = task_sel.selector; | CPU_TR = task_sel.selector; | 
 | CPU_TR_DESC = task_sel.desc; | CPU_TR_DESC = task_sel.desc; | 
 |  | CPU_TR_DESC.type |= CPU_SYSDESC_TYPE_TSS_BUSY_IND; | 
 |  |  | 
 | /* I/O deny bitmap */ | /* I/O deny bitmap */ | 
 | CPU_STAT_IOLIMIT = 0; | CPU_STAT_IOLIMIT = 0; | 
| if (task_sel.desc.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { | if (CPU_TR_DESC.type == CPU_SYSDESC_TYPE_TSS_BUSY_32) { | 
| if (iobase != 0 && iobase < task_sel.desc.u.seg.limit) { | if (iobase != 0 && iobase < CPU_TR_DESC.u.seg.limit) { | 
| CPU_STAT_IOLIMIT = (UINT16)(task_sel.desc.u.seg.limit - iobase); | CPU_STAT_IOLIMIT = (UINT16)(CPU_TR_DESC.u.seg.limit - iobase); | 
| CPU_STAT_IOADDR = task_sel.desc.u.seg.segbase + iobase; | CPU_STAT_IOADDR = CPU_TR_DESC.u.seg.segbase + iobase; | 
 | } | } | 
 | } | } | 
 |  |  | 
| Line 217  task_switch(selector_t *task_sel, task_s | Line 216  task_switch(selector_t *task_sel, task_s | 
 | switch (task_sel->desc.type) { | switch (task_sel->desc.type) { | 
 | case CPU_SYSDESC_TYPE_TSS_32: | case CPU_SYSDESC_TYPE_TSS_32: | 
 | case CPU_SYSDESC_TYPE_TSS_BUSY_32: | case CPU_SYSDESC_TYPE_TSS_BUSY_32: | 
| if (task_sel->desc.u.seg.limit < TSS_SIZE_32) { | if (task_sel->desc.u.seg.limit < TSS_32_LIMIT) { | 
 | EXCEPTION(TS_EXCEPTION, task_sel->idx); | EXCEPTION(TS_EXCEPTION, task_sel->idx); | 
 | } | } | 
 | task16 = 0; | task16 = 0; | 
| Line 225  task_switch(selector_t *task_sel, task_s | Line 224  task_switch(selector_t *task_sel, task_s | 
 |  |  | 
 | case CPU_SYSDESC_TYPE_TSS_16: | case CPU_SYSDESC_TYPE_TSS_16: | 
 | case CPU_SYSDESC_TYPE_TSS_BUSY_16: | case CPU_SYSDESC_TYPE_TSS_BUSY_16: | 
| if (task_sel->desc.u.seg.limit < TSS_SIZE_16) { | if (task_sel->desc.u.seg.limit < TSS_16_LIMIT) { | 
 | EXCEPTION(TS_EXCEPTION, task_sel->idx); | EXCEPTION(TS_EXCEPTION, task_sel->idx); | 
 | } | } | 
 | task16 = 1; | task16 = 1; | 
| Line 334  task_switch(selector_t *task_sel, task_s | Line 333  task_switch(selector_t *task_sel, task_s | 
 | /*FALLTHROUGH*/ | /*FALLTHROUGH*/ | 
 | case TASK_SWITCH_JMP: | case TASK_SWITCH_JMP: | 
 | /* clear busy flags in current task */ | /* clear busy flags in current task */ | 
| set_task_free(CPU_TR, &CPU_TR_DESC); | set_task_free(CPU_TR); | 
 | break; | break; | 
 |  |  | 
 | case TASK_SWITCH_CALL: | case TASK_SWITCH_CALL: | 
| Line 409  task_switch(selector_t *task_sel, task_s | Line 408  task_switch(selector_t *task_sel, task_s | 
 | new_flags |= NT_FLAG; | new_flags |= NT_FLAG; | 
 | /*FALLTHROUGH*/ | /*FALLTHROUGH*/ | 
 | case TASK_SWITCH_JMP: | case TASK_SWITCH_JMP: | 
| set_task_busy(task_sel->selector, &task_sel->desc); | set_task_busy(task_sel->selector); | 
 | break; | break; | 
|  |  | 
 | case TASK_SWITCH_IRET: | case TASK_SWITCH_IRET: | 
 | /* check busy flag is active */ | /* check busy flag is active */ | 
 | if (SEG_IS_VALID(&task_sel->desc)) { | if (SEG_IS_VALID(&task_sel->desc)) { | 
| Line 431  task_switch(selector_t *task_sel, task_s | Line 430  task_switch(selector_t *task_sel, task_s | 
 | /* load task selector to CPU_TR */ | /* load task selector to CPU_TR */ | 
 | CPU_TR = task_sel->selector; | CPU_TR = task_sel->selector; | 
 | CPU_TR_DESC = task_sel->desc; | CPU_TR_DESC = task_sel->desc; | 
|  | CPU_TR_DESC.type |= CPU_SYSDESC_TYPE_TSS_BUSY_IND; | 
| /* clear BUSY flag in descriptor cache */ |  | 
| CPU_TR_DESC.type &= ~CPU_SYSDESC_TYPE_TSS_BUSY_IND; |  | 
 |  |  | 
 | /* set CR0 image CPU_CR0_TS */ | /* set CR0 image CPU_CR0_TS */ | 
 | CPU_CR0 |= CPU_CR0_TS; | CPU_CR0 |= CPU_CR0_TS; | 
| Line 461  task_switch(selector_t *task_sel, task_s | Line 458  task_switch(selector_t *task_sel, task_s | 
 |  |  | 
 | /* I/O deny bitmap */ | /* I/O deny bitmap */ | 
 | CPU_STAT_IOLIMIT = 0; | CPU_STAT_IOLIMIT = 0; | 
| if (!task16 && iobase != 0 && iobase < task_sel->desc.u.seg.limit) { | if (!task16 && iobase != 0 && iobase < CPU_TR_DESC.u.seg.limit) { | 
| CPU_STAT_IOLIMIT = (UINT16)(task_sel->desc.u.seg.limit - iobase); | CPU_STAT_IOLIMIT = (UINT16)(CPU_TR_DESC.u.seg.limit - iobase); | 
 | CPU_STAT_IOADDR = task_base + iobase; | CPU_STAT_IOADDR = task_base + iobase; | 
 | } | } | 
 | VERBOSE(("task_switch: ioaddr = %08x, limit = %08x", CPU_STAT_IOADDR, CPU_STAT_IOLIMIT)); | VERBOSE(("task_switch: ioaddr = %08x, limit = %08x", CPU_STAT_IOADDR, CPU_STAT_IOLIMIT)); |