2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
31 /* return non zero if error */
32 static inline int load_segment_ra(CPUX86State
*env
, uint32_t *e1_ptr
,
33 uint32_t *e2_ptr
, int selector
,
45 index
= selector
& ~7;
46 if ((index
+ 7) > dt
->limit
) {
49 ptr
= dt
->base
+ index
;
50 *e1_ptr
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
51 *e2_ptr
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
55 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
56 uint32_t *e2_ptr
, int selector
)
58 return load_segment_ra(env
, e1_ptr
, e2_ptr
, selector
, 0);
61 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
65 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
66 if (e2
& DESC_G_MASK
) {
67 limit
= (limit
<< 12) | 0xfff;
72 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
74 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
77 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
80 sc
->base
= get_seg_base(e1
, e2
);
81 sc
->limit
= get_seg_limit(e1
, e2
);
85 /* init the segment cache in vm86 mode. */
86 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
90 cpu_x86_load_seg_cache(env
, seg
, selector
, (selector
<< 4), 0xffff,
91 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
92 DESC_A_MASK
| (3 << DESC_DPL_SHIFT
));
95 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
96 uint32_t *esp_ptr
, int dpl
,
99 X86CPU
*cpu
= env_archcpu(env
);
100 int type
, index
, shift
;
105 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
106 for (i
= 0; i
< env
->tr
.limit
; i
++) {
107 printf("%02x ", env
->tr
.base
[i
]);
116 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
117 cpu_abort(CPU(cpu
), "invalid tss");
119 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
120 if ((type
& 7) != 1) {
121 cpu_abort(CPU(cpu
), "invalid tss type");
124 index
= (dpl
* 4 + 2) << shift
;
125 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
126 raise_exception_err_ra(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc, retaddr
);
129 *esp_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
130 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 2, retaddr
);
132 *esp_ptr
= cpu_ldl_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
133 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 4, retaddr
);
137 static void tss_load_seg(CPUX86State
*env
, X86Seg seg_reg
, int selector
,
138 int cpl
, uintptr_t retaddr
)
143 if ((selector
& 0xfffc) != 0) {
144 if (load_segment_ra(env
, &e1
, &e2
, selector
, retaddr
) != 0) {
145 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
147 if (!(e2
& DESC_S_MASK
)) {
148 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
151 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
152 if (seg_reg
== R_CS
) {
153 if (!(e2
& DESC_CS_MASK
)) {
154 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
157 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
159 } else if (seg_reg
== R_SS
) {
160 /* SS must be writable data */
161 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
162 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
164 if (dpl
!= cpl
|| dpl
!= rpl
) {
165 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
168 /* not readable code */
169 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
170 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
172 /* if data or non conforming code, checks the rights */
173 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
174 if (dpl
< cpl
|| dpl
< rpl
) {
175 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
179 if (!(e2
& DESC_P_MASK
)) {
180 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, retaddr
);
182 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
183 get_seg_base(e1
, e2
),
184 get_seg_limit(e1
, e2
),
187 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
188 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
193 #define SWITCH_TSS_JMP 0
194 #define SWITCH_TSS_IRET 1
195 #define SWITCH_TSS_CALL 2
197 /* XXX: restore CPU state in registers (PowerPC case) */
198 static void switch_tss_ra(CPUX86State
*env
, int tss_selector
,
199 uint32_t e1
, uint32_t e2
, int source
,
200 uint32_t next_eip
, uintptr_t retaddr
)
202 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
203 target_ulong tss_base
;
204 uint32_t new_regs
[8], new_segs
[6];
205 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
206 uint32_t old_eflags
, eflags_mask
;
211 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
212 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
215 /* if task gate, we read the TSS segment and we load it */
217 if (!(e2
& DESC_P_MASK
)) {
218 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
220 tss_selector
= e1
>> 16;
221 if (tss_selector
& 4) {
222 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
224 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, retaddr
) != 0) {
225 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
227 if (e2
& DESC_S_MASK
) {
228 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
230 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
231 if ((type
& 7) != 1) {
232 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
236 if (!(e2
& DESC_P_MASK
)) {
237 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
245 tss_limit
= get_seg_limit(e1
, e2
);
246 tss_base
= get_seg_base(e1
, e2
);
247 if ((tss_selector
& 4) != 0 ||
248 tss_limit
< tss_limit_max
) {
249 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
251 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
253 old_tss_limit_max
= 103;
255 old_tss_limit_max
= 43;
258 /* read all the registers from the new TSS */
261 new_cr3
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x1c, retaddr
);
262 new_eip
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x20, retaddr
);
263 new_eflags
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x24, retaddr
);
264 for (i
= 0; i
< 8; i
++) {
265 new_regs
[i
] = cpu_ldl_kernel_ra(env
, tss_base
+ (0x28 + i
* 4),
268 for (i
= 0; i
< 6; i
++) {
269 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x48 + i
* 4),
272 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x60, retaddr
);
273 new_trap
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x64, retaddr
);
277 new_eip
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x0e, retaddr
);
278 new_eflags
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x10, retaddr
);
279 for (i
= 0; i
< 8; i
++) {
280 new_regs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x12 + i
* 2), retaddr
);
282 for (i
= 0; i
< 4; i
++) {
283 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x22 + i
* 2),
286 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x2a, retaddr
);
291 /* XXX: avoid a compiler warning, see
292 http://support.amd.com/us/Processor_TechDocs/24593.pdf
293 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
296 /* NOTE: we must avoid memory exceptions during the task switch,
297 so we make dummy accesses before */
298 /* XXX: it can still fail in some cases, so a bigger hack is
299 necessary to valid the TLB after having done the accesses */
301 v1
= cpu_ldub_kernel_ra(env
, env
->tr
.base
, retaddr
);
302 v2
= cpu_ldub_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, retaddr
);
303 cpu_stb_kernel_ra(env
, env
->tr
.base
, v1
, retaddr
);
304 cpu_stb_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, v2
, retaddr
);
306 /* clear busy bit (it is restartable) */
307 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
311 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
312 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
313 e2
&= ~DESC_TSS_BUSY_MASK
;
314 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
316 old_eflags
= cpu_compute_eflags(env
);
317 if (source
== SWITCH_TSS_IRET
) {
318 old_eflags
&= ~NT_MASK
;
321 /* save the current state in the old TSS */
324 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x20, next_eip
, retaddr
);
325 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x24, old_eflags
, retaddr
);
326 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
], retaddr
);
327 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
], retaddr
);
328 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
], retaddr
);
329 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
], retaddr
);
330 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
], retaddr
);
331 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
], retaddr
);
332 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
], retaddr
);
333 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
], retaddr
);
334 for (i
= 0; i
< 6; i
++) {
335 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x48 + i
* 4),
336 env
->segs
[i
].selector
, retaddr
);
340 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x0e, next_eip
, retaddr
);
341 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x10, old_eflags
, retaddr
);
342 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
], retaddr
);
343 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
], retaddr
);
344 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
], retaddr
);
345 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
], retaddr
);
346 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
], retaddr
);
347 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
], retaddr
);
348 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
], retaddr
);
349 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
], retaddr
);
350 for (i
= 0; i
< 4; i
++) {
351 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x22 + i
* 2),
352 env
->segs
[i
].selector
, retaddr
);
356 /* now if an exception occurs, it will occurs in the next task
359 if (source
== SWITCH_TSS_CALL
) {
360 cpu_stw_kernel_ra(env
, tss_base
, env
->tr
.selector
, retaddr
);
361 new_eflags
|= NT_MASK
;
365 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
369 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
370 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
371 e2
|= DESC_TSS_BUSY_MASK
;
372 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
375 /* set the new CPU state */
376 /* from this point, any exception which occurs can give problems */
377 env
->cr
[0] |= CR0_TS_MASK
;
378 env
->hflags
|= HF_TS_MASK
;
379 env
->tr
.selector
= tss_selector
;
380 env
->tr
.base
= tss_base
;
381 env
->tr
.limit
= tss_limit
;
382 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
384 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
385 cpu_x86_update_cr3(env
, new_cr3
);
388 /* load all registers without an exception, then reload them with
389 possible exception */
391 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
392 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
394 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
395 for (i
= 0; i
< 8; i
++) {
396 env
->regs
[i
] = new_regs
[i
];
399 cpu_load_eflags(env
, new_eflags
, eflags_mask
& 0xffff);
400 for (i
= 0; i
< 8; i
++) {
401 env
->regs
[i
] = (env
->regs
[i
] & 0xffff0000) | new_regs
[i
];
404 if (new_eflags
& VM_MASK
) {
405 for (i
= 0; i
< 6; i
++) {
406 load_seg_vm(env
, i
, new_segs
[i
]);
409 /* first just selectors as the rest may trigger exceptions */
410 for (i
= 0; i
< 6; i
++) {
411 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
415 env
->ldt
.selector
= new_ldt
& ~4;
422 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
425 if ((new_ldt
& 0xfffc) != 0) {
427 index
= new_ldt
& ~7;
428 if ((index
+ 7) > dt
->limit
) {
429 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
431 ptr
= dt
->base
+ index
;
432 e1
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
433 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
434 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
435 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
437 if (!(e2
& DESC_P_MASK
)) {
438 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
440 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
443 /* load the segments */
444 if (!(new_eflags
& VM_MASK
)) {
445 int cpl
= new_segs
[R_CS
] & 3;
446 tss_load_seg(env
, R_CS
, new_segs
[R_CS
], cpl
, retaddr
);
447 tss_load_seg(env
, R_SS
, new_segs
[R_SS
], cpl
, retaddr
);
448 tss_load_seg(env
, R_ES
, new_segs
[R_ES
], cpl
, retaddr
);
449 tss_load_seg(env
, R_DS
, new_segs
[R_DS
], cpl
, retaddr
);
450 tss_load_seg(env
, R_FS
, new_segs
[R_FS
], cpl
, retaddr
);
451 tss_load_seg(env
, R_GS
, new_segs
[R_GS
], cpl
, retaddr
);
454 /* check that env->eip is in the CS segment limits */
455 if (new_eip
> env
->segs
[R_CS
].limit
) {
456 /* XXX: different exception if CALL? */
457 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
460 #ifndef CONFIG_USER_ONLY
461 /* reset local breakpoints */
462 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
463 cpu_x86_update_dr7(env
, env
->dr
[7] & ~DR7_LOCAL_BP_MASK
);
468 static void switch_tss(CPUX86State
*env
, int tss_selector
,
469 uint32_t e1
, uint32_t e2
, int source
,
472 switch_tss_ra(env
, tss_selector
, e1
, e2
, source
, next_eip
, 0);
475 static inline unsigned int get_sp_mask(unsigned int e2
)
478 if (e2
& DESC_L_MASK
) {
482 if (e2
& DESC_B_MASK
) {
489 int exception_has_error_code(int intno
)
505 #define SET_ESP(val, sp_mask) \
507 if ((sp_mask) == 0xffff) { \
508 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
510 } else if ((sp_mask) == 0xffffffffLL) { \
511 env->regs[R_ESP] = (uint32_t)(val); \
513 env->regs[R_ESP] = (val); \
517 #define SET_ESP(val, sp_mask) \
519 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
520 ((val) & (sp_mask)); \
524 /* in 64-bit machines, this can overflow. So this segment addition macro
525 * can be used to trim the value to 32-bit whenever needed */
526 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
528 /* XXX: add a is_user flag to have proper security support */
529 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
532 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
535 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
538 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
541 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
543 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
547 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
549 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
553 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
554 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
555 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
556 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
558 /* protected mode interrupt */
559 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
560 int error_code
, unsigned int next_eip
,
564 target_ulong ptr
, ssp
;
565 int type
, dpl
, selector
, ss_dpl
, cpl
;
566 int has_error_code
, new_stack
, shift
;
567 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
568 uint32_t old_eip
, sp_mask
;
569 int vm86
= env
->eflags
& VM_MASK
;
572 if (!is_int
&& !is_hw
) {
573 has_error_code
= exception_has_error_code(intno
);
582 if (intno
* 8 + 7 > dt
->limit
) {
583 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
585 ptr
= dt
->base
+ intno
* 8;
586 e1
= cpu_ldl_kernel(env
, ptr
);
587 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
588 /* check gate type */
589 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
591 case 5: /* task gate */
592 case 6: /* 286 interrupt gate */
593 case 7: /* 286 trap gate */
594 case 14: /* 386 interrupt gate */
595 case 15: /* 386 trap gate */
598 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
601 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
602 cpl
= env
->hflags
& HF_CPL_MASK
;
603 /* check privilege if software int */
604 if (is_int
&& dpl
< cpl
) {
605 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
610 /* must do that check here to return the correct error code */
611 if (!(e2
& DESC_P_MASK
)) {
612 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
614 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
615 if (has_error_code
) {
619 /* push the error code */
620 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
622 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
627 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
628 ssp
= env
->segs
[R_SS
].base
+ esp
;
630 cpu_stl_kernel(env
, ssp
, error_code
);
632 cpu_stw_kernel(env
, ssp
, error_code
);
639 /* Otherwise, trap or interrupt gate */
641 /* check valid bit */
642 if (!(e2
& DESC_P_MASK
)) {
643 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
646 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
647 if ((selector
& 0xfffc) == 0) {
648 raise_exception_err(env
, EXCP0D_GPF
, 0);
650 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
651 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
653 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
654 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
656 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
658 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
660 if (!(e2
& DESC_P_MASK
)) {
661 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
663 if (e2
& DESC_C_MASK
) {
667 /* to inner privilege */
668 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
, 0);
669 if ((ss
& 0xfffc) == 0) {
670 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
672 if ((ss
& 3) != dpl
) {
673 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
675 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
676 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
678 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
680 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
682 if (!(ss_e2
& DESC_S_MASK
) ||
683 (ss_e2
& DESC_CS_MASK
) ||
684 !(ss_e2
& DESC_W_MASK
)) {
685 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
687 if (!(ss_e2
& DESC_P_MASK
)) {
688 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
691 sp_mask
= get_sp_mask(ss_e2
);
692 ssp
= get_seg_base(ss_e1
, ss_e2
);
694 /* to same privilege */
696 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
699 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
700 ssp
= env
->segs
[R_SS
].base
;
701 esp
= env
->regs
[R_ESP
];
707 /* XXX: check that enough room is available */
708 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
717 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
718 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
719 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
720 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
722 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
723 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
725 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
726 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
727 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
728 if (has_error_code
) {
729 PUSHL(ssp
, esp
, sp_mask
, error_code
);
734 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
735 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
736 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
737 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
739 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
740 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
742 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
743 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
744 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
745 if (has_error_code
) {
746 PUSHW(ssp
, esp
, sp_mask
, error_code
);
750 /* interrupt gate clear IF mask */
751 if ((type
& 1) == 0) {
752 env
->eflags
&= ~IF_MASK
;
754 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
758 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
759 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
760 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
761 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
763 ss
= (ss
& ~3) | dpl
;
764 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
765 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
767 SET_ESP(esp
, sp_mask
);
769 selector
= (selector
& ~3) | dpl
;
770 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
771 get_seg_base(e1
, e2
),
772 get_seg_limit(e1
, e2
),
779 #define PUSHQ_RA(sp, val, ra) \
782 cpu_stq_kernel_ra(env, sp, (val), ra); \
785 #define POPQ_RA(sp, val, ra) \
787 val = cpu_ldq_kernel_ra(env, sp, ra); \
791 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
792 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
794 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
796 X86CPU
*cpu
= env_archcpu(env
);
800 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
801 env
->tr
.base
, env
->tr
.limit
);
804 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
805 cpu_abort(CPU(cpu
), "invalid tss");
807 index
= 8 * level
+ 4;
808 if ((index
+ 7) > env
->tr
.limit
) {
809 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
811 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
814 /* 64 bit interrupt */
815 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
816 int error_code
, target_ulong next_eip
, int is_hw
)
820 int type
, dpl
, selector
, cpl
, ist
;
821 int has_error_code
, new_stack
;
822 uint32_t e1
, e2
, e3
, ss
;
823 target_ulong old_eip
, esp
, offset
;
826 if (!is_int
&& !is_hw
) {
827 has_error_code
= exception_has_error_code(intno
);
836 if (intno
* 16 + 15 > dt
->limit
) {
837 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
839 ptr
= dt
->base
+ intno
* 16;
840 e1
= cpu_ldl_kernel(env
, ptr
);
841 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
842 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
843 /* check gate type */
844 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
846 case 14: /* 386 interrupt gate */
847 case 15: /* 386 trap gate */
850 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
853 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
854 cpl
= env
->hflags
& HF_CPL_MASK
;
855 /* check privilege if software int */
856 if (is_int
&& dpl
< cpl
) {
857 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
859 /* check valid bit */
860 if (!(e2
& DESC_P_MASK
)) {
861 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
864 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
866 if ((selector
& 0xfffc) == 0) {
867 raise_exception_err(env
, EXCP0D_GPF
, 0);
870 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
871 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
873 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
874 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
876 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
878 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
880 if (!(e2
& DESC_P_MASK
)) {
881 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
883 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
884 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
886 if (e2
& DESC_C_MASK
) {
889 if (dpl
< cpl
|| ist
!= 0) {
890 /* to inner privilege */
892 esp
= get_rsp_from_tss(env
, ist
!= 0 ? ist
+ 3 : dpl
);
895 /* to same privilege */
896 if (env
->eflags
& VM_MASK
) {
897 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
900 esp
= env
->regs
[R_ESP
];
902 esp
&= ~0xfLL
; /* align stack */
904 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
905 PUSHQ(esp
, env
->regs
[R_ESP
]);
906 PUSHQ(esp
, cpu_compute_eflags(env
));
907 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
909 if (has_error_code
) {
910 PUSHQ(esp
, error_code
);
913 /* interrupt gate clear IF mask */
914 if ((type
& 1) == 0) {
915 env
->eflags
&= ~IF_MASK
;
917 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
921 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, dpl
<< DESC_DPL_SHIFT
);
923 env
->regs
[R_ESP
] = esp
;
925 selector
= (selector
& ~3) | dpl
;
926 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
927 get_seg_base(e1
, e2
),
928 get_seg_limit(e1
, e2
),
935 void helper_sysret(CPUX86State
*env
, int dflag
)
939 if (!(env
->efer
& MSR_EFER_SCE
)) {
940 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
942 cpl
= env
->hflags
& HF_CPL_MASK
;
943 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
944 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
946 selector
= (env
->star
>> 48) & 0xffff;
947 if (env
->hflags
& HF_LMA_MASK
) {
948 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
949 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
952 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
954 DESC_G_MASK
| DESC_P_MASK
|
955 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
956 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
958 env
->eip
= env
->regs
[R_ECX
];
960 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
962 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
963 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
964 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
965 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
967 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
969 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
970 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
971 DESC_W_MASK
| DESC_A_MASK
);
973 env
->eflags
|= IF_MASK
;
974 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
976 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
977 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
978 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
979 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
980 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
982 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
983 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
984 DESC_W_MASK
| DESC_A_MASK
);
989 /* real mode interrupt */
990 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
991 int error_code
, unsigned int next_eip
)
994 target_ulong ptr
, ssp
;
996 uint32_t offset
, esp
;
997 uint32_t old_cs
, old_eip
;
999 /* real mode (simpler!) */
1001 if (intno
* 4 + 3 > dt
->limit
) {
1002 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1004 ptr
= dt
->base
+ intno
* 4;
1005 offset
= cpu_lduw_kernel(env
, ptr
);
1006 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1007 esp
= env
->regs
[R_ESP
];
1008 ssp
= env
->segs
[R_SS
].base
;
1014 old_cs
= env
->segs
[R_CS
].selector
;
1015 /* XXX: use SS segment size? */
1016 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1017 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1018 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1020 /* update processor state */
1021 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1023 env
->segs
[R_CS
].selector
= selector
;
1024 env
->segs
[R_CS
].base
= (selector
<< 4);
1025 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1029 * Begin execution of an interruption. is_int is TRUE if coming from
1030 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1031 * instruction. It is only relevant if is_int is TRUE.
1033 void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1034 int error_code
, target_ulong next_eip
, int is_hw
)
1036 CPUX86State
*env
= &cpu
->env
;
1038 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1039 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1042 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1043 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1044 count
, intno
, error_code
, is_int
,
1045 env
->hflags
& HF_CPL_MASK
,
1046 env
->segs
[R_CS
].selector
, env
->eip
,
1047 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1048 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1049 if (intno
== 0x0e) {
1050 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1052 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1055 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1062 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1063 for (i
= 0; i
< 16; i
++) {
1064 qemu_log(" %02x", ldub(ptr
+ i
));
1072 if (env
->cr
[0] & CR0_PE_MASK
) {
1073 #if !defined(CONFIG_USER_ONLY)
1074 if (env
->hflags
& HF_GUEST_MASK
) {
1075 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1078 #ifdef TARGET_X86_64
1079 if (env
->hflags
& HF_LMA_MASK
) {
1080 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1084 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1088 #if !defined(CONFIG_USER_ONLY)
1089 if (env
->hflags
& HF_GUEST_MASK
) {
1090 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1093 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1096 #if !defined(CONFIG_USER_ONLY)
1097 if (env
->hflags
& HF_GUEST_MASK
) {
1098 CPUState
*cs
= CPU(cpu
);
1099 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+
1100 offsetof(struct vmcb
,
1101 control
.event_inj
));
1104 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1105 event_inj
& ~SVM_EVTINJ_VALID
);
1110 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1112 do_interrupt_all(env_archcpu(env
), intno
, 0, 0, 0, is_hw
);
1115 bool x86_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1117 X86CPU
*cpu
= X86_CPU(cs
);
1118 CPUX86State
*env
= &cpu
->env
;
1121 interrupt_request
= x86_cpu_pending_interrupt(cs
, interrupt_request
);
1122 if (!interrupt_request
) {
1126 /* Don't process multiple interrupt requests in a single call.
1127 * This is required to make icount-driven execution deterministic.
1129 switch (interrupt_request
) {
1130 #if !defined(CONFIG_USER_ONLY)
1131 case CPU_INTERRUPT_POLL
:
1132 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
1133 apic_poll_irq(cpu
->apic_state
);
1136 case CPU_INTERRUPT_SIPI
:
1139 case CPU_INTERRUPT_SMI
:
1140 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
, 0, 0);
1141 cs
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
1142 #ifdef CONFIG_USER_ONLY
1143 cpu_abort(CPU(cpu
), "SMI interrupt: cannot enter SMM in user-mode");
1146 #endif /* CONFIG_USER_ONLY */
1148 case CPU_INTERRUPT_NMI
:
1149 cpu_svm_check_intercept_param(env
, SVM_EXIT_NMI
, 0, 0);
1150 cs
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1151 env
->hflags2
|= HF2_NMI_MASK
;
1152 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
1154 case CPU_INTERRUPT_MCE
:
1155 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
1156 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
1158 case CPU_INTERRUPT_HARD
:
1159 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
, 0, 0);
1160 cs
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
1161 CPU_INTERRUPT_VIRQ
);
1162 intno
= cpu_get_pic_interrupt(env
);
1163 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1164 "Servicing hardware INT=0x%02x\n", intno
);
1165 do_interrupt_x86_hardirq(env
, intno
, 1);
1167 #if !defined(CONFIG_USER_ONLY)
1168 case CPU_INTERRUPT_VIRQ
:
1169 /* FIXME: this should respect TPR */
1170 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
, 0, 0);
1171 intno
= x86_ldl_phys(cs
, env
->vm_vmcb
1172 + offsetof(struct vmcb
, control
.int_vector
));
1173 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1174 "Servicing virtual hardware INT=0x%02x\n", intno
);
1175 do_interrupt_x86_hardirq(env
, intno
, 1);
1176 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
1181 /* Ensure that no TB jump will be modified as the program flow was changed. */
1185 void helper_lldt(CPUX86State
*env
, int selector
)
1189 int index
, entry_limit
;
1193 if ((selector
& 0xfffc) == 0) {
1194 /* XXX: NULL selector case: invalid LDT */
1198 if (selector
& 0x4) {
1199 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1202 index
= selector
& ~7;
1203 #ifdef TARGET_X86_64
1204 if (env
->hflags
& HF_LMA_MASK
) {
1211 if ((index
+ entry_limit
) > dt
->limit
) {
1212 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1214 ptr
= dt
->base
+ index
;
1215 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1216 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1217 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1218 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1220 if (!(e2
& DESC_P_MASK
)) {
1221 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1223 #ifdef TARGET_X86_64
1224 if (env
->hflags
& HF_LMA_MASK
) {
1227 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1228 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1229 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1233 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1236 env
->ldt
.selector
= selector
;
1239 void helper_ltr(CPUX86State
*env
, int selector
)
1243 int index
, type
, entry_limit
;
1247 if ((selector
& 0xfffc) == 0) {
1248 /* NULL selector case: invalid TR */
1253 if (selector
& 0x4) {
1254 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1257 index
= selector
& ~7;
1258 #ifdef TARGET_X86_64
1259 if (env
->hflags
& HF_LMA_MASK
) {
1266 if ((index
+ entry_limit
) > dt
->limit
) {
1267 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1269 ptr
= dt
->base
+ index
;
1270 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1271 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1272 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1273 if ((e2
& DESC_S_MASK
) ||
1274 (type
!= 1 && type
!= 9)) {
1275 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1277 if (!(e2
& DESC_P_MASK
)) {
1278 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1280 #ifdef TARGET_X86_64
1281 if (env
->hflags
& HF_LMA_MASK
) {
1284 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1285 e4
= cpu_ldl_kernel_ra(env
, ptr
+ 12, GETPC());
1286 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1287 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1289 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1290 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1294 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1296 e2
|= DESC_TSS_BUSY_MASK
;
1297 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1299 env
->tr
.selector
= selector
;
1302 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1303 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1312 cpl
= env
->hflags
& HF_CPL_MASK
;
1313 if ((selector
& 0xfffc) == 0) {
1314 /* null selector case */
1316 #ifdef TARGET_X86_64
1317 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1320 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1322 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1325 if (selector
& 0x4) {
1330 index
= selector
& ~7;
1331 if ((index
+ 7) > dt
->limit
) {
1332 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1334 ptr
= dt
->base
+ index
;
1335 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1336 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1338 if (!(e2
& DESC_S_MASK
)) {
1339 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1342 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1343 if (seg_reg
== R_SS
) {
1344 /* must be writable segment */
1345 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1346 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1348 if (rpl
!= cpl
|| dpl
!= cpl
) {
1349 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1352 /* must be readable segment */
1353 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1354 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1357 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1358 /* if not conforming code, test rights */
1359 if (dpl
< cpl
|| dpl
< rpl
) {
1360 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1365 if (!(e2
& DESC_P_MASK
)) {
1366 if (seg_reg
== R_SS
) {
1367 raise_exception_err_ra(env
, EXCP0C_STACK
, selector
& 0xfffc, GETPC());
1369 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1373 /* set the access bit if not already set */
1374 if (!(e2
& DESC_A_MASK
)) {
1376 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1379 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1380 get_seg_base(e1
, e2
),
1381 get_seg_limit(e1
, e2
),
1384 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1385 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1390 /* protected mode jump */
1391 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1392 target_ulong next_eip
)
1395 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1397 if ((new_cs
& 0xfffc) == 0) {
1398 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1400 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1401 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1403 cpl
= env
->hflags
& HF_CPL_MASK
;
1404 if (e2
& DESC_S_MASK
) {
1405 if (!(e2
& DESC_CS_MASK
)) {
1406 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1408 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1409 if (e2
& DESC_C_MASK
) {
1410 /* conforming code segment */
1412 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1415 /* non conforming code segment */
1418 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1421 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1424 if (!(e2
& DESC_P_MASK
)) {
1425 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1427 limit
= get_seg_limit(e1
, e2
);
1428 if (new_eip
> limit
&&
1429 (!(env
->hflags
& HF_LMA_MASK
) || !(e2
& DESC_L_MASK
))) {
1430 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1432 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1433 get_seg_base(e1
, e2
), limit
, e2
);
1436 /* jump to call or task gate */
1437 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1439 cpl
= env
->hflags
& HF_CPL_MASK
;
1440 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1442 #ifdef TARGET_X86_64
1443 if (env
->efer
& MSR_EFER_LMA
) {
1445 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1450 case 1: /* 286 TSS */
1451 case 9: /* 386 TSS */
1452 case 5: /* task gate */
1453 if (dpl
< cpl
|| dpl
< rpl
) {
1454 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1456 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
, GETPC());
1458 case 4: /* 286 call gate */
1459 case 12: /* 386 call gate */
1460 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1461 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1463 if (!(e2
& DESC_P_MASK
)) {
1464 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1467 new_eip
= (e1
& 0xffff);
1469 new_eip
|= (e2
& 0xffff0000);
1472 #ifdef TARGET_X86_64
1473 if (env
->efer
& MSR_EFER_LMA
) {
1474 /* load the upper 8 bytes of the 64-bit call gate */
1475 if (load_segment_ra(env
, &e1
, &e2
, new_cs
+ 8, GETPC())) {
1476 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1479 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1481 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1484 new_eip
|= ((target_ulong
)e1
) << 32;
1488 if (load_segment_ra(env
, &e1
, &e2
, gate_cs
, GETPC()) != 0) {
1489 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1491 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1492 /* must be code segment */
1493 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1494 (DESC_S_MASK
| DESC_CS_MASK
))) {
1495 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1497 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1498 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1499 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1501 #ifdef TARGET_X86_64
1502 if (env
->efer
& MSR_EFER_LMA
) {
1503 if (!(e2
& DESC_L_MASK
)) {
1504 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1506 if (e2
& DESC_B_MASK
) {
1507 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1511 if (!(e2
& DESC_P_MASK
)) {
1512 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1514 limit
= get_seg_limit(e1
, e2
);
1515 if (new_eip
> limit
&&
1516 (!(env
->hflags
& HF_LMA_MASK
) || !(e2
& DESC_L_MASK
))) {
1517 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1519 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1520 get_seg_base(e1
, e2
), limit
, e2
);
1524 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1530 /* real mode call */
1531 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1532 int shift
, int next_eip
)
1535 uint32_t esp
, esp_mask
;
1539 esp
= env
->regs
[R_ESP
];
1540 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1541 ssp
= env
->segs
[R_SS
].base
;
1543 PUSHL_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1544 PUSHL_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1546 PUSHW_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1547 PUSHW_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1550 SET_ESP(esp
, esp_mask
);
1552 env
->segs
[R_CS
].selector
= new_cs
;
1553 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1556 /* protected mode call */
1557 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1558 int shift
, target_ulong next_eip
)
1561 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, param_count
;
1562 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, type
, ss_dpl
, sp_mask
;
1563 uint32_t val
, limit
, old_sp_mask
;
1564 target_ulong ssp
, old_ssp
, offset
, sp
;
1566 LOG_PCALL("lcall %04x:" TARGET_FMT_lx
" s=%d\n", new_cs
, new_eip
, shift
);
1567 LOG_PCALL_STATE(env_cpu(env
));
1568 if ((new_cs
& 0xfffc) == 0) {
1569 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1571 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1572 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1574 cpl
= env
->hflags
& HF_CPL_MASK
;
1575 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1576 if (e2
& DESC_S_MASK
) {
1577 if (!(e2
& DESC_CS_MASK
)) {
1578 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1580 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1581 if (e2
& DESC_C_MASK
) {
1582 /* conforming code segment */
1584 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1587 /* non conforming code segment */
1590 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1593 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1596 if (!(e2
& DESC_P_MASK
)) {
1597 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1600 #ifdef TARGET_X86_64
1601 /* XXX: check 16/32 bit cases in long mode */
1606 rsp
= env
->regs
[R_ESP
];
1607 PUSHQ_RA(rsp
, env
->segs
[R_CS
].selector
, GETPC());
1608 PUSHQ_RA(rsp
, next_eip
, GETPC());
1609 /* from this point, not restartable */
1610 env
->regs
[R_ESP
] = rsp
;
1611 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1612 get_seg_base(e1
, e2
),
1613 get_seg_limit(e1
, e2
), e2
);
1618 sp
= env
->regs
[R_ESP
];
1619 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1620 ssp
= env
->segs
[R_SS
].base
;
1622 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1623 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1625 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1626 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1629 limit
= get_seg_limit(e1
, e2
);
1630 if (new_eip
> limit
) {
1631 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1633 /* from this point, not restartable */
1634 SET_ESP(sp
, sp_mask
);
1635 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1636 get_seg_base(e1
, e2
), limit
, e2
);
1640 /* check gate type */
1641 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1642 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1645 #ifdef TARGET_X86_64
1646 if (env
->efer
& MSR_EFER_LMA
) {
1648 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1654 case 1: /* available 286 TSS */
1655 case 9: /* available 386 TSS */
1656 case 5: /* task gate */
1657 if (dpl
< cpl
|| dpl
< rpl
) {
1658 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1660 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
, GETPC());
1662 case 4: /* 286 call gate */
1663 case 12: /* 386 call gate */
1666 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1671 if (dpl
< cpl
|| dpl
< rpl
) {
1672 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1674 /* check valid bit */
1675 if (!(e2
& DESC_P_MASK
)) {
1676 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1678 selector
= e1
>> 16;
1679 param_count
= e2
& 0x1f;
1680 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1681 #ifdef TARGET_X86_64
1682 if (env
->efer
& MSR_EFER_LMA
) {
1683 /* load the upper 8 bytes of the 64-bit call gate */
1684 if (load_segment_ra(env
, &e1
, &e2
, new_cs
+ 8, GETPC())) {
1685 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1688 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1690 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1693 offset
|= ((target_ulong
)e1
) << 32;
1696 if ((selector
& 0xfffc) == 0) {
1697 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1700 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
1701 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1703 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1704 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1706 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1708 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1710 #ifdef TARGET_X86_64
1711 if (env
->efer
& MSR_EFER_LMA
) {
1712 if (!(e2
& DESC_L_MASK
)) {
1713 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1715 if (e2
& DESC_B_MASK
) {
1716 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1721 if (!(e2
& DESC_P_MASK
)) {
1722 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1725 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1726 /* to inner privilege */
1727 #ifdef TARGET_X86_64
1729 sp
= get_rsp_from_tss(env
, dpl
);
1730 ss
= dpl
; /* SS = NULL selector with RPL = new CPL */
1733 ssp
= 0; /* SS base is always zero in IA-32e mode */
1734 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1735 TARGET_FMT_lx
"\n", ss
, sp
, env
->regs
[R_ESP
]);
1740 get_ss_esp_from_tss(env
, &ss
, &sp32
, dpl
, GETPC());
1741 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1742 TARGET_FMT_lx
"\n", ss
, sp32
, param_count
,
1745 if ((ss
& 0xfffc) == 0) {
1746 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1748 if ((ss
& 3) != dpl
) {
1749 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1751 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, ss
, GETPC()) != 0) {
1752 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1754 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1755 if (ss_dpl
!= dpl
) {
1756 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1758 if (!(ss_e2
& DESC_S_MASK
) ||
1759 (ss_e2
& DESC_CS_MASK
) ||
1760 !(ss_e2
& DESC_W_MASK
)) {
1761 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1763 if (!(ss_e2
& DESC_P_MASK
)) {
1764 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1767 sp_mask
= get_sp_mask(ss_e2
);
1768 ssp
= get_seg_base(ss_e1
, ss_e2
);
1771 /* push_size = ((param_count * 2) + 8) << shift; */
1773 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1774 old_ssp
= env
->segs
[R_SS
].base
;
1775 #ifdef TARGET_X86_64
1777 /* XXX: verify if new stack address is canonical */
1778 PUSHQ_RA(sp
, env
->segs
[R_SS
].selector
, GETPC());
1779 PUSHQ_RA(sp
, env
->regs
[R_ESP
], GETPC());
1780 /* parameters aren't supported for 64-bit call gates */
1784 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1785 PUSHL_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1786 for (i
= param_count
- 1; i
>= 0; i
--) {
1787 val
= cpu_ldl_kernel_ra(env
, old_ssp
+
1788 ((env
->regs
[R_ESP
] + i
* 4) &
1789 old_sp_mask
), GETPC());
1790 PUSHL_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1793 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1794 PUSHW_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1795 for (i
= param_count
- 1; i
>= 0; i
--) {
1796 val
= cpu_lduw_kernel_ra(env
, old_ssp
+
1797 ((env
->regs
[R_ESP
] + i
* 2) &
1798 old_sp_mask
), GETPC());
1799 PUSHW_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1804 /* to same privilege */
1805 sp
= env
->regs
[R_ESP
];
1806 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1807 ssp
= env
->segs
[R_SS
].base
;
1808 /* push_size = (4 << shift); */
1812 #ifdef TARGET_X86_64
1814 PUSHQ_RA(sp
, env
->segs
[R_CS
].selector
, GETPC());
1815 PUSHQ_RA(sp
, next_eip
, GETPC());
1819 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1820 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1822 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1823 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1826 /* from this point, not restartable */
1829 #ifdef TARGET_X86_64
1831 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
1835 ss
= (ss
& ~3) | dpl
;
1836 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1838 get_seg_limit(ss_e1
, ss_e2
),
1843 selector
= (selector
& ~3) | dpl
;
1844 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1845 get_seg_base(e1
, e2
),
1846 get_seg_limit(e1
, e2
),
1848 SET_ESP(sp
, sp_mask
);
1853 /* real and vm86 mode iret */
1854 void helper_iret_real(CPUX86State
*env
, int shift
)
1856 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1860 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
1861 sp
= env
->regs
[R_ESP
];
1862 ssp
= env
->segs
[R_SS
].base
;
1865 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
1866 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
1868 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
1871 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
1872 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
1873 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
1875 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
1876 env
->segs
[R_CS
].selector
= new_cs
;
1877 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1879 if (env
->eflags
& VM_MASK
) {
1880 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
1883 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
1887 eflags_mask
&= 0xffff;
1889 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
1890 env
->hflags2
&= ~HF2_NMI_MASK
;
1893 static inline void validate_seg(CPUX86State
*env
, X86Seg seg_reg
, int cpl
)
1898 /* XXX: on x86_64, we do not want to nullify FS and GS because
1899 they may still contain a valid base. I would be interested to
1900 know how a real x86_64 CPU behaves */
1901 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
1902 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
1906 e2
= env
->segs
[seg_reg
].flags
;
1907 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1908 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1909 /* data or non conforming code segment */
1911 cpu_x86_load_seg_cache(env
, seg_reg
, 0,
1912 env
->segs
[seg_reg
].base
,
1913 env
->segs
[seg_reg
].limit
,
1914 env
->segs
[seg_reg
].flags
& ~DESC_P_MASK
);
1919 /* protected mode iret */
1920 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
1921 int is_iret
, int addend
,
1924 uint32_t new_cs
, new_eflags
, new_ss
;
1925 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
1926 uint32_t e1
, e2
, ss_e1
, ss_e2
;
1927 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
1928 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
1930 #ifdef TARGET_X86_64
1936 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1938 sp
= env
->regs
[R_ESP
];
1939 ssp
= env
->segs
[R_SS
].base
;
1940 new_eflags
= 0; /* avoid warning */
1941 #ifdef TARGET_X86_64
1943 POPQ_RA(sp
, new_eip
, retaddr
);
1944 POPQ_RA(sp
, new_cs
, retaddr
);
1947 POPQ_RA(sp
, new_eflags
, retaddr
);
1954 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
1955 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
1958 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
1959 if (new_eflags
& VM_MASK
) {
1960 goto return_to_vm86
;
1965 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
1966 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
1968 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
1972 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
1973 new_cs
, new_eip
, shift
, addend
);
1974 LOG_PCALL_STATE(env_cpu(env
));
1975 if ((new_cs
& 0xfffc) == 0) {
1976 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
1978 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, retaddr
) != 0) {
1979 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
1981 if (!(e2
& DESC_S_MASK
) ||
1982 !(e2
& DESC_CS_MASK
)) {
1983 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
1985 cpl
= env
->hflags
& HF_CPL_MASK
;
1988 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
1990 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1991 if (e2
& DESC_C_MASK
) {
1993 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
1997 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2000 if (!(e2
& DESC_P_MASK
)) {
2001 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, retaddr
);
2005 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2006 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2007 /* return to same privilege level */
2008 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2009 get_seg_base(e1
, e2
),
2010 get_seg_limit(e1
, e2
),
2013 /* return to different privilege level */
2014 #ifdef TARGET_X86_64
2016 POPQ_RA(sp
, new_esp
, retaddr
);
2017 POPQ_RA(sp
, new_ss
, retaddr
);
2024 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2025 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2029 POPW_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2030 POPW_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2033 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2035 if ((new_ss
& 0xfffc) == 0) {
2036 #ifdef TARGET_X86_64
2037 /* NULL ss is allowed in long mode if cpl != 3 */
2038 /* XXX: test CS64? */
2039 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2040 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2042 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2043 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2044 DESC_W_MASK
| DESC_A_MASK
);
2045 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2049 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2052 if ((new_ss
& 3) != rpl
) {
2053 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2055 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, new_ss
, retaddr
) != 0) {
2056 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2058 if (!(ss_e2
& DESC_S_MASK
) ||
2059 (ss_e2
& DESC_CS_MASK
) ||
2060 !(ss_e2
& DESC_W_MASK
)) {
2061 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2063 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2065 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2067 if (!(ss_e2
& DESC_P_MASK
)) {
2068 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc, retaddr
);
2070 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2071 get_seg_base(ss_e1
, ss_e2
),
2072 get_seg_limit(ss_e1
, ss_e2
),
2076 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2077 get_seg_base(e1
, e2
),
2078 get_seg_limit(e1
, e2
),
2081 #ifdef TARGET_X86_64
2082 if (env
->hflags
& HF_CS64_MASK
) {
2087 sp_mask
= get_sp_mask(ss_e2
);
2090 /* validate data segments */
2091 validate_seg(env
, R_ES
, rpl
);
2092 validate_seg(env
, R_DS
, rpl
);
2093 validate_seg(env
, R_FS
, rpl
);
2094 validate_seg(env
, R_GS
, rpl
);
2098 SET_ESP(sp
, sp_mask
);
2101 /* NOTE: 'cpl' is the _old_ CPL */
2102 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2104 eflags_mask
|= IOPL_MASK
;
2106 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2108 eflags_mask
|= IF_MASK
;
2111 eflags_mask
&= 0xffff;
2113 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2118 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2119 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2120 POPL_RA(ssp
, sp
, sp_mask
, new_es
, retaddr
);
2121 POPL_RA(ssp
, sp
, sp_mask
, new_ds
, retaddr
);
2122 POPL_RA(ssp
, sp
, sp_mask
, new_fs
, retaddr
);
2123 POPL_RA(ssp
, sp
, sp_mask
, new_gs
, retaddr
);
2125 /* modify processor state */
2126 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2127 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2129 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2130 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2131 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2132 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2133 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2134 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2136 env
->eip
= new_eip
& 0xffff;
2137 env
->regs
[R_ESP
] = new_esp
;
2140 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2142 int tss_selector
, type
;
2145 /* specific case for TSS */
2146 if (env
->eflags
& NT_MASK
) {
2147 #ifdef TARGET_X86_64
2148 if (env
->hflags
& HF_LMA_MASK
) {
2149 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2152 tss_selector
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0, GETPC());
2153 if (tss_selector
& 4) {
2154 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2156 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, GETPC()) != 0) {
2157 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2159 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2160 /* NOTE: we check both segment and busy TSS */
2162 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2164 switch_tss_ra(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
, GETPC());
2166 helper_ret_protected(env
, shift
, 1, 0, GETPC());
2168 env
->hflags2
&= ~HF2_NMI_MASK
;
2171 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2173 helper_ret_protected(env
, shift
, 0, addend
, GETPC());
2176 void helper_sysenter(CPUX86State
*env
)
2178 if (env
->sysenter_cs
== 0) {
2179 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2181 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2183 #ifdef TARGET_X86_64
2184 if (env
->hflags
& HF_LMA_MASK
) {
2185 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2187 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2189 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2194 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2196 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2198 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2200 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2202 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2204 DESC_W_MASK
| DESC_A_MASK
);
2205 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2206 env
->eip
= env
->sysenter_eip
;
2209 void helper_sysexit(CPUX86State
*env
, int dflag
)
2213 cpl
= env
->hflags
& HF_CPL_MASK
;
2214 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2215 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2217 #ifdef TARGET_X86_64
2219 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2221 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2222 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2223 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2225 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2227 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2228 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2229 DESC_W_MASK
| DESC_A_MASK
);
2233 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2235 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2236 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2237 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2238 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2240 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2241 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2242 DESC_W_MASK
| DESC_A_MASK
);
2244 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2245 env
->eip
= env
->regs
[R_EDX
];
2248 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2251 uint32_t e1
, e2
, eflags
, selector
;
2252 int rpl
, dpl
, cpl
, type
;
2254 selector
= selector1
& 0xffff;
2255 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2256 if ((selector
& 0xfffc) == 0) {
2259 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2263 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2264 cpl
= env
->hflags
& HF_CPL_MASK
;
2265 if (e2
& DESC_S_MASK
) {
2266 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2269 if (dpl
< cpl
|| dpl
< rpl
) {
2274 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2285 if (dpl
< cpl
|| dpl
< rpl
) {
2287 CC_SRC
= eflags
& ~CC_Z
;
2291 limit
= get_seg_limit(e1
, e2
);
2292 CC_SRC
= eflags
| CC_Z
;
2296 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2298 uint32_t e1
, e2
, eflags
, selector
;
2299 int rpl
, dpl
, cpl
, type
;
2301 selector
= selector1
& 0xffff;
2302 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2303 if ((selector
& 0xfffc) == 0) {
2306 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2310 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2311 cpl
= env
->hflags
& HF_CPL_MASK
;
2312 if (e2
& DESC_S_MASK
) {
2313 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2316 if (dpl
< cpl
|| dpl
< rpl
) {
2321 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2335 if (dpl
< cpl
|| dpl
< rpl
) {
2337 CC_SRC
= eflags
& ~CC_Z
;
2341 CC_SRC
= eflags
| CC_Z
;
2342 return e2
& 0x00f0ff00;
2345 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2347 uint32_t e1
, e2
, eflags
, selector
;
2350 selector
= selector1
& 0xffff;
2351 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2352 if ((selector
& 0xfffc) == 0) {
2355 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2358 if (!(e2
& DESC_S_MASK
)) {
2362 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2363 cpl
= env
->hflags
& HF_CPL_MASK
;
2364 if (e2
& DESC_CS_MASK
) {
2365 if (!(e2
& DESC_R_MASK
)) {
2368 if (!(e2
& DESC_C_MASK
)) {
2369 if (dpl
< cpl
|| dpl
< rpl
) {
2374 if (dpl
< cpl
|| dpl
< rpl
) {
2376 CC_SRC
= eflags
& ~CC_Z
;
2380 CC_SRC
= eflags
| CC_Z
;
2383 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2385 uint32_t e1
, e2
, eflags
, selector
;
2388 selector
= selector1
& 0xffff;
2389 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2390 if ((selector
& 0xfffc) == 0) {
2393 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2396 if (!(e2
& DESC_S_MASK
)) {
2400 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2401 cpl
= env
->hflags
& HF_CPL_MASK
;
2402 if (e2
& DESC_CS_MASK
) {
2405 if (dpl
< cpl
|| dpl
< rpl
) {
2408 if (!(e2
& DESC_W_MASK
)) {
2410 CC_SRC
= eflags
& ~CC_Z
;
2414 CC_SRC
= eflags
| CC_Z
;