2 * librm: a library for interfacing to real-mode code
4 * Michael Brown <mbrown@fensystems.co.uk>
8 FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL )
10 /* Drag in general configuration */
11 #include <config/general.h>
13 /* Drag in local definitions */
16 /* CR0: protection enabled */
17 #define CR0_PE ( 1 << 0 )
20 #define CR0_PG ( 1 << 31 )
22 /* CR4: physical address extensions */
23 #define CR4_PAE ( 1 << 5 )
25 /* Extended feature enable MSR (EFER) */
26 #define MSR_EFER 0xc0000080
28 /* EFER: long mode enable */
29 #define EFER_LME ( 1 << 8 )
34 /* Page: read/write */
37 /* Page: user/supervisor */
43 /* Size of various paging-related data structures */
44 #define SIZEOF_PTE_LOG2 3
45 #define SIZEOF_PTE ( 1 << SIZEOF_PTE_LOG2 )
46 #define SIZEOF_PT_LOG2 12
47 #define SIZEOF_PT ( 1 << SIZEOF_PT_LOG2 )
48 #define SIZEOF_4KB_PAGE_LOG2 12
49 #define SIZEOF_4KB_PAGE ( 1 << SIZEOF_4KB_PAGE_LOG2 )
50 #define SIZEOF_2MB_PAGE_LOG2 21
51 #define SIZEOF_2MB_PAGE ( 1 << SIZEOF_2MB_PAGE_LOG2 )
52 #define SIZEOF_LOW_4GB_LOG2 32
53 #define SIZEOF_LOW_4GB ( 1 << SIZEOF_LOW_4GB_LOG2 )
55 /* Size of various C data structures */
56 #define SIZEOF_I386_SEG_REGS 12
57 #define SIZEOF_I386_REGS 32
58 #define SIZEOF_REAL_MODE_REGS ( SIZEOF_I386_SEG_REGS + SIZEOF_I386_REGS )
59 #define SIZEOF_I386_FLAGS 4
60 #define SIZEOF_I386_ALL_REGS ( SIZEOF_REAL_MODE_REGS + SIZEOF_I386_FLAGS )
61 #define SIZEOF_X86_64_REGS 128
63 /* Size of an address */
70 /* Default code size */
72 #define CODE_DEFAULT code64
74 #define CODE_DEFAULT code32
77 /* Selectively assemble code for 32-bit/64-bit builds */
86 /****************************************************************************
87 * Global descriptor table
89 * Call init_librm to set up the GDT before attempting to use any
90 * protected-mode code.
92 * NOTE: This must be located before prot_to_real, otherwise gas
93 * throws a "can't handle non absolute segment in `ljmp'" error due to
94 * not knowing the value of REAL_CS when the ljmp is encountered.
96 * Note also that putting ".word gdt_end - gdt - 1" directly into
97 * gdt_limit, rather than going via gdt_length, will also produce the
98 * "non absolute segment" error. This is most probably a bug in gas.
99 ****************************************************************************
101 .section ".data16.gdt", "aw", @progbits
104 gdtr: /* The first GDT entry is unused, the GDTR can fit here. */
105 gdt_limit: .word gdt_length - 1
107 .word 0 /* padding */
109 .org gdt + VIRTUAL_CS, 0
110 virtual_cs: /* 32 bit protected mode code segment, virtual addresses */
112 .byte 0, 0x9f, 0xcf, 0
114 .org gdt + VIRTUAL_DS, 0
115 virtual_ds: /* 32 bit protected mode data segment, virtual addresses */
117 .byte 0, 0x93, 0xcf, 0
119 .org gdt + PHYSICAL_CS, 0
120 physical_cs: /* 32 bit protected mode code segment, physical addresses */
122 .byte 0, 0x9f, 0xcf, 0
124 .org gdt + PHYSICAL_DS, 0
125 physical_ds: /* 32 bit protected mode data segment, physical addresses */
127 .byte 0, 0x93, 0xcf, 0
129 .org gdt + REAL_CS, 0
130 real_cs: /* 16 bit real mode code segment */
132 .byte 0, 0x9b, 0x00, 0
134 .org gdt + REAL_DS, 0
135 real_ds: /* 16 bit real mode data segment */
137 .byte 0, 0x93, 0x00, 0
140 p2r_ds: /* 16 bit real mode data segment for prot_to_real transition */
141 .word 0xffff, ( P2R_DS << 4 )
142 .byte 0, 0x93, 0x00, 0
144 .org gdt + LONG_CS, 0
145 long_cs: /* 64 bit long mode code segment */
147 .byte 0, 0x9a, 0x20, 0
150 .equ gdt_length, gdt_end - gdt
152 /****************************************************************************
153 * Stored real-mode and protected-mode stack pointers
155 * The real-mode stack pointer is stored here whenever real_to_prot
156 * is called and restored whenever prot_to_real is called. The
157 * converse happens for the protected-mode stack pointer.
159 * Despite initial appearances this scheme is, in fact re-entrant,
160 * because program flow dictates that we always return via the point
161 * we left by. For example:
165 * Print a text string
175 * At point 1, the RM mode stack value, say RPXE, is stored in
176 * rm_ss,sp. We want this value to still be present in rm_ss,sp when
179 * At point 2, the RM stack value is restored from RPXE. At point 3,
180 * the RM stack value is again stored in rm_ss,sp. This *does*
181 * overwrite the RPXE that we have stored there, but it's the same
182 * value, since the code between points 2 and 3 has managed to return
184 ****************************************************************************
186 .section ".bss.rm_ss_sp", "aw", @nobits
192 .section ".data.pm_esp", "aw", @progbits
193 pm_esp: .long VIRTUAL(_estack)
195 /****************************************************************************
196 * Temporary static data buffer
198 * This is used to reduce the amount of real-mode stack space consumed
199 * during mode transitions, since we are sometimes called with very
200 * little real-mode stack space available.
201 ****************************************************************************
203 /* Temporary static buffer usage by virt_call */
207 VC_TMP_PAD: .space 4 /* for alignment */
211 VC_TMP_EMER: .space 8
213 #ifdef TIVOLI_VMM_WORKAROUND
214 VC_TMP_FXSAVE: .space 512
219 /* Temporary static buffer usage by real_call */
221 RC_TMP_FUNCTION: .space 4
225 /* Shared temporary static buffer */
226 .section ".bss16.rm_tmpbuf", "aw", @nobits
230 .size rm_tmpbuf, . - rm_tmpbuf
232 /****************************************************************************
233 * Virtual address offsets
235 * These are used by the protected-mode code to map between virtual
236 * and physical addresses, and to access variables in the .text16 or
238 ****************************************************************************
241 VA_VIRT_OFFSET: .space SIZEOF_ADDR
242 VA_TEXT16: .space SIZEOF_ADDR
243 VA_DATA16: .space SIZEOF_ADDR
247 /* Internal copies, used only by librm itself */
248 .section ".bss16.rm_virt_addrs", "aw", @nobits
249 rm_virt_addrs: .space VA_SIZE
250 .equ rm_virt_offset, ( rm_virt_addrs + VA_VIRT_OFFSET )
251 .equ rm_text16, ( rm_virt_addrs + VA_TEXT16 )
252 .equ rm_data16, ( rm_virt_addrs + VA_DATA16 )
254 /* Externally visible variables, used by C code */
255 .section ".bss.virt_addrs", "aw", @nobits
256 virt_addrs: .space VA_SIZE
258 .equ virt_offset, ( virt_addrs + VA_VIRT_OFFSET )
260 .equ text16, ( virt_addrs + VA_TEXT16 )
262 .equ data16, ( virt_addrs + VA_DATA16 )
264 /****************************************************************************
265 * init_librm (real-mode far call, 16-bit real-mode far return address)
267 * Initialise the GDT ready for transitions to protected mode.
270 * %cs : .text16 segment
271 * %ds : .data16 segment
272 * %edi : Physical base of protected-mode code
273 ****************************************************************************
275 .section ".text16.init_librm", "ax", @progbits
279 /* Preserve registers */
284 /* Store rm_virt_offset and set up virtual_cs and virtual_ds segments */
285 subl $VIRTUAL(_textdata), %edi
286 movl %edi, rm_virt_offset
287 .if64 ; setae (rm_virt_offset+4) ; .endif
289 movw $virtual_cs, %bx
291 movw $virtual_ds, %bx
294 /* Store rm_cs and rm_text16, set up real_cs segment */
301 .if32 ; subl %edi, %eax ; .endif
304 /* Store rm_ds and rm_data16, set up real_ds segment and GDT base */
313 .if32 ; subl %edi, %eax ; .endif
316 /* Configure virt_call for protected mode, if applicable */
317 .if64 ; movl $VIRTUAL(vc_pmode), %cs:vc_jmp_offset ; .endif
319 /* Switch to protected mode */
320 virtcall init_librm_pmode
321 .section ".text.init_librm", "ax", @progbits
325 /* Store virt_offset, text16, and data16 */
329 movl $rm_virt_addrs, %esi
330 movl $VIRTUAL(virt_addrs), %edi
331 movl $( VA_SIZE / 4 ), %ecx
335 .if64 ; /* Initialise long mode, if applicable */
336 movl VIRTUAL(virt_offset), %edi
337 leal VIRTUAL(p2l_ljmp_target)(%edi), %eax
338 movl %eax, VIRTUAL(p2l_ljmp_offset)
341 /* Return to real mode */
343 .section ".text16.init_librm", "ax", @progbits
347 /* Configure virt_call for long mode, if applicable */
348 .if64 ; movl $VIRTUAL(vc_lmode), %cs:vc_jmp_offset ; .endif
353 /* Restore registers */
359 .section ".text16.set_seg_base", "ax", @progbits
369 /****************************************************************************
370 * real_to_prot (real-mode near call, 32-bit virtual return address)
372 * Switch from 16-bit real-mode to 32-bit protected mode with virtual
373 * addresses. The real-mode %ss:sp is stored in rm_ss and rm_sp, and
374 * the protected-mode %esp is restored from the saved pm_esp.
375 * Interrupts are disabled. All other registers may be destroyed.
377 * The return address for this function should be a 32-bit virtual
381 * %ecx : number of bytes to move from RM stack to PM stack
382 * %edx : number of bytes to copy from RM temporary buffer to PM stack
384 ****************************************************************************
386 .section ".text16.real_to_prot", "ax", @progbits
389 /* Enable A20 line */
391 /* A failure at this point is fatal, and there's nothing we
392 * can do about it other than lock the machine to make the
393 * problem immediately visible.
397 /* Make sure we have our data segment available */
400 /* Add protected-mode return address to length of data to be copied */
401 addw $4, %cx /* %ecx must be less than 64kB anyway */
403 /* Real-mode %ss:%sp => %ebp and virtual address => %esi */
408 addr32 leal (%eax,%ebp), %esi
409 subl rm_virt_offset, %esi
413 /* Real-mode data segment virtual address => %ebx */
415 .if64 ; subl rm_virt_offset, %ebx ; .endif
417 /* Load protected-mode global descriptor table */
420 /* Zero segment registers. This wastes around 12 cycles on
421 * real hardware, but saves a substantial number of emulated
422 * instructions under KVM.
431 /* Switch to protected mode (with paging disabled if applicable) */
434 .if64 ; andl $~CR0_PG, %eax ; .endif
437 data32 ljmp $VIRTUAL_CS, $VIRTUAL(r2p_pmode)
438 .section ".text.real_to_prot", "ax", @progbits
441 /* Set up protected-mode data segments and stack pointer */
442 movw $VIRTUAL_DS, %ax
448 movl VIRTUAL(pm_esp), %esp
450 /* Load protected-mode interrupt descriptor table */
453 /* Record real-mode %ss:sp (after removal of data) */
455 movl %ebp, VIRTUAL(rm_sp)
457 /* Move data from RM stack to PM stack */
463 /* Copy data from RM temporary buffer to PM stack */
464 leal rm_tmpbuf(%ebx), %esi
468 /* Return to virtual address */
471 /****************************************************************************
472 * prot_to_real (protected-mode near call, 32-bit real-mode return address)
474 * Switch from 32-bit protected mode with virtual addresses to 16-bit
475 * real mode. The protected-mode %esp is stored in pm_esp and the
476 * real-mode %ss:sp is restored from the saved rm_ss and rm_sp. The
477 * high word of the real-mode %esp is set to zero. All real-mode data
478 * segment registers are loaded from the saved rm_ds. Interrupts are
479 * *not* enabled, since we want to be able to use prot_to_real in an
480 * ISR. All other registers may be destroyed.
482 * The return address for this function should be a 32-bit (sic)
483 * real-mode offset within .code16.
486 * %ecx : number of bytes to move from PM stack to RM stack
487 * %edx : number of bytes to move from PM stack to RM temporary buffer
488 * %esi : real-mode global and interrupt descriptor table registers
490 ****************************************************************************
492 .section ".text.prot_to_real", "ax", @progbits
495 /* Copy real-mode global descriptor table register to RM code segment */
496 movl VIRTUAL(text16), %edi
497 .if64 ; subl VIRTUAL(virt_offset), %edi ; .endif
498 leal rm_gdtr(%edi), %edi
502 /* Load real-mode interrupt descriptor table register */
505 /* Add return address to data to be moved to RM stack */
508 /* Real-mode %ss:sp => %ebp and virtual address => %edi */
509 movl VIRTUAL(rm_sp), %ebp
511 movzwl VIRTUAL(rm_ss), %eax
515 subl VIRTUAL(virt_offset), %edi
517 /* Move data from PM stack to RM stack */
521 /* Move data from PM stack to RM temporary buffer */
522 movl VIRTUAL(data16), %edi
523 .if64 ; subl VIRTUAL(virt_offset), %edi ; .endif
524 addl $rm_tmpbuf, %edi
528 /* Record protected-mode %esp (after removal of data) */
529 movl %esi, VIRTUAL(pm_esp)
531 /* Load real-mode segment limits */
538 ljmp $REAL_CS, $p2r_rmode
539 .section ".text16.prot_to_real", "ax", @progbits
542 /* Load real-mode GDT */
543 data32 lgdt %cs:rm_gdtr
544 /* Switch to real mode */
551 /* Set up real-mode data segments and stack pointer */
562 /* Return to real-mode address */
566 /* Real-mode code and data segments. Assigned by the call to
567 * init_librm. rm_cs doubles as the segment part of the jump
568 * instruction used by prot_to_real. Both are located in
569 * .text16 rather than .data16: rm_cs since it forms part of
570 * the jump instruction within the code segment, and rm_ds
571 * since real-mode code needs to be able to locate the data
572 * segment with no other reference available.
575 .equ rm_cs, ( p2r_ljmp_rm_cs + 3 )
577 .section ".text16.data.rm_ds", "aw", @progbits
581 /* Real-mode global and interrupt descriptor table registers */
582 .section ".text16.data.rm_gdtr", "aw", @progbits
587 /****************************************************************************
588 * phys_to_prot (protected-mode near call, 32-bit physical return address)
590 * Switch from 32-bit protected mode with physical addresses to 32-bit
591 * protected mode with virtual addresses. %esp is adjusted to a
592 * virtual address. All other registers are preserved.
594 * The return address for this function should be a 32-bit physical
597 ****************************************************************************
599 .section ".text.phys_to_prot", "ax", @progbits
603 /* Preserve registers */
607 /* Switch to virtual code segment */
609 ljmp $VIRTUAL_CS, $VIRTUAL(1f)
611 /* Switch to virtual data segment and adjust %esp */
612 movw $VIRTUAL_DS, %ax
618 movl VIRTUAL(virt_offset), %ebp
621 /* Adjust return address to a virtual address */
624 /* Restore registers and return */
629 .if32 /* Expose as _phys_to_virt for use by COMBOOT, if applicable */
631 .equ _phys_to_virt, phys_to_prot
634 /****************************************************************************
635 * prot_to_phys (protected-mode near call, 32-bit virtual return address)
637 * Switch from 32-bit protected mode with virtual addresses to 32-bit
638 * protected mode with physical addresses. %esp is adjusted to a
639 * physical address. All other registers are preserved.
641 * The return address for this function should be a 32-bit virtual
644 ****************************************************************************
646 .section ".text.prot_to_phys", "ax", @progbits
649 /* Preserve registers */
653 /* Adjust return address to a physical address */
654 movl VIRTUAL(virt_offset), %ebp
657 /* Switch to physical code segment */
660 leal VIRTUAL(1f)(%ebp), %eax
664 /* Switch to physical data segment and adjust %esp */
665 movw $PHYSICAL_DS, %ax
673 /* Restore registers and return */
678 .if32 /* Expose as _virt_to_phys for use by COMBOOT, if applicable */
680 .equ _virt_to_phys, prot_to_phys
683 /****************************************************************************
684 * intr_to_prot (protected-mode near call, 32-bit virtual return address)
686 * Switch from 32-bit protected mode with a virtual code segment and
687 * either a physical or virtual stack segment to 32-bit protected mode
688 * with normal virtual addresses. %esp is adjusted if necessary to a
689 * virtual address. All other registers are preserved.
691 * The return address for this function should be a 32-bit virtual
694 ****************************************************************************
696 .section ".text.intr_to_prot", "ax", @progbits
700 /* Preserve registers */
703 /* Check whether stack segment is physical or virtual */
705 cmpw $VIRTUAL_DS, %ax
706 movw $VIRTUAL_DS, %ax
708 /* Reload data segment registers */
714 /* Reload stack segment and adjust %esp if necessary */
717 subl VIRTUAL(virt_offset), %esp
719 /* Restore registers and return */
723 /* Expose as _intr_to_virt for use by GDB */
725 .equ _intr_to_virt, intr_to_prot
727 /****************************************************************************
728 * prot_to_long (protected-mode near call, 32-bit virtual return address)
730 * Switch from 32-bit protected mode with virtual addresses to 64-bit
731 * long mode. The protected-mode %esp is adjusted to a physical
732 * address. All other registers are preserved.
734 * The return address for this function should be a 32-bit (sic)
737 ****************************************************************************
741 .section ".text.prot_to_long", "ax", @progbits
744 /* Preserve registers */
750 movl VIRTUAL(pml4), %eax
758 /* Enable long mode */
769 /* Restore registers */
774 /* Construct 64-bit return address */
776 movl $0xffffffff, 4(%esp)
778 /* Switch to long mode (using a physical %rip) */
782 /* Adjust and zero-extend %esp to a physical address */
783 addl virt_offset, %esp
785 /* Use long-mode IDT */
788 /* Return to virtual address */
791 /* Long mode jump offset and target. Required since an ljmp
792 * in protected mode will zero-extend the offset, and so
793 * cannot reach an address within the negative 2GB as used by
794 * -mcmodel=kernel. Assigned by the call to init_librm.
796 .equ p2l_ljmp_offset, ( p2l_ljmp + 1 )
797 .equ p2l_ljmp_target, p2l_lmode
801 /****************************************************************************
802 * long_to_prot (long-mode near call, 64-bit virtual return address)
804 * Switch from 64-bit long mode to 32-bit protected mode with virtual
805 * addresses. The long-mode %rsp is adjusted to a virtual address.
806 * All other registers are preserved.
808 * The return address for this function should be a 64-bit (sic)
811 ****************************************************************************
815 .section ".text.long_to_prot", "ax", @progbits
818 /* Switch to protected mode */
822 /* Adjust %esp to a virtual address */
823 subl VIRTUAL(virt_offset), %esp
825 /* Preserve registers */
835 /* Disable PAE (in case external non-PAE-aware code enables paging) */
840 /* Disable long mode */
846 /* Restore registers */
851 /* Use protected-mode IDT */
857 /* Long mode jump vector. Required since there is no "ljmp
858 * immediate" instruction in long mode.
860 .section ".data.l2p_vector", "aw", @progbits
862 .long VIRTUAL(l2p_pmode), VIRTUAL_CS
866 /****************************************************************************
867 * long_save_regs (long-mode near call, 64-bit virtual return address)
869 * Preserve registers that are accessible only in long mode. This
870 * includes %r8-%r15 and the upper halves of %rax, %rbx, %rcx, %rdx,
871 * %rsi, %rdi, and %rbp.
873 ****************************************************************************
877 .section ".text.long_preserve_regs", "ax", @progbits
880 /* Preserve registers */
899 jmp *SIZEOF_X86_64_REGS(%rsp)
903 /****************************************************************************
904 * long_restore_regs (long-mode near call, 64-bit virtual return address)
906 * Restore registers that are accessible only in long mode. This
907 * includes %r8-%r15 and the upper halves of %rax, %rbx, %rcx, %rdx,
908 * %rsi, %rdi, and %rbp.
910 ****************************************************************************
914 .section ".text.long_restore_regs", "ax", @progbits
917 /* Move return address above register dump */
918 popq SIZEOF_X86_64_REGS(%rsp)
920 /* Restore registers */
935 leaq 8(%rsp), %rsp /* discard */
950 /****************************************************************************
951 * virt_call (real-mode near call, 16-bit real-mode near return address)
953 * Call a specific C function in 32-bit protected mode or 64-bit long
954 * mode (as applicable). The prototype of the C function must be
955 * void function ( struct i386_all_regs *ix86 );
956 * ix86 will point to a struct containing the real-mode registers
957 * at entry to virt_call().
959 * All registers will be preserved across virt_call(), unless the C
960 * function explicitly overwrites values in ix86. Interrupt status
961 * and GDT will also be preserved. Gate A20 will be enabled.
963 * Note that virt_call() does not rely on the real-mode stack
964 * remaining intact in order to return, since everything relevant is
965 * copied to the protected-mode stack for the duration of the call.
966 * In particular, this means that a real-mode prefix can make a call
967 * to main() which will return correctly even if the prefix's stack
968 * gets vapourised during the Etherboot run. (The prefix cannot rely
969 * on anything else on the stack being preserved, so should move any
970 * critical data to registers before calling main()).
973 * function : 32-bit virtual address of function to call
976 * pushl $pxe_api_call
978 * to call in to the C function
979 * void pxe_api_call ( struct i386_all_regs *ix86 );
980 ****************************************************************************
983 VC_OFFSET_IX86: .space SIZEOF_I386_ALL_REGS
984 VC_OFFSET_PADDING: .space 2 /* for alignment */
985 VC_OFFSET_RETADDR: .space 2
987 VC_OFFSET_FUNCTION: .space 4
991 .section ".text16.virt_call", "ax", @progbits
995 /* Preserve registers and flags on external RM stack */
996 pushw %ss /* padding */
1006 /* Claim ownership of temporary static buffer */
1010 #ifdef TIVOLI_VMM_WORKAROUND
1011 /* Preserve FPU, MMX and SSE state in temporary static buffer */
1012 fxsave ( rm_tmpbuf + VC_TMP_FXSAVE )
1014 /* Preserve GDT and IDT in temporary static buffer */
1015 sidt ( rm_tmpbuf + VC_TMP_IDT )
1016 sgdt ( rm_tmpbuf + VC_TMP_GDT )
1018 .if64 ; /* Preserve control registers, if applicable */
1019 movl $MSR_EFER, %ecx
1021 movl %eax, ( rm_tmpbuf + VC_TMP_EMER + 0 )
1022 movl %edx, ( rm_tmpbuf + VC_TMP_EMER + 4 )
1024 movl %eax, ( rm_tmpbuf + VC_TMP_CR4 )
1026 movl %eax, ( rm_tmpbuf + VC_TMP_CR3 )
1028 /* For sanity's sake, clear the direction flag as soon as possible */
1031 /* Switch to protected mode and move register dump to PM stack */
1032 movl $VC_OFFSET_END, %ecx
1033 movl $VC_TMP_END, %edx
1034 pushl $VIRTUAL(vc_pmode)
1035 vc_jmp: jmp real_to_prot
1036 .section ".text.virt_call", "ax", @progbits
1039 /* Call function (in protected mode) */
1041 call *(VC_OFFSET_FUNCTION+4)(%esp)
1042 popl %eax /* discard */
1044 .if64 ; /* Switch to long mode */
1050 /* Call function (in long mode) */
1052 movslq VC_OFFSET_FUNCTION(%rsp), %rax
1055 /* Switch to protected mode */
1059 /* Switch to real mode and move register dump back to RM stack */
1060 movl $VC_OFFSET_END, %ecx
1061 movl $VC_TMP_END, %edx
1062 leal VC_TMP_GDT(%esp, %ecx), %esi
1065 .section ".text16.virt_call", "ax", @progbits
1068 .if64 ; /* Restore control registers, if applicable */
1070 movl ( rm_tmpbuf + VC_TMP_CR3 ), %eax
1072 movl ( rm_tmpbuf + VC_TMP_CR4 ), %eax
1074 movl ( rm_tmpbuf + VC_TMP_EMER + 0 ), %eax
1075 movl ( rm_tmpbuf + VC_TMP_EMER + 4 ), %edx
1076 movl $MSR_EFER, %ecx
1080 #ifdef TIVOLI_VMM_WORKAROUND
1081 /* Restore FPU, MMX and SSE state from temporary static buffer */
1082 fxrstor ( rm_tmpbuf + VC_TMP_FXSAVE )
1084 /* Restore registers and flags and return */
1085 popl %eax /* skip %cs and %ss */
1091 /* popal skips %esp. We therefore want to do "movl -20(%sp),
1092 * %esp", but -20(%sp) is not a valid 80386 expression.
1093 * Fortunately, prot_to_real() zeroes the high word of %esp, so
1094 * we can just use -20(%esp) instead.
1096 addr32 movl -20(%esp), %esp
1098 popw %ss /* padding */
1100 /* Return and discard function parameters */
1101 ret $( VC_OFFSET_END - VC_OFFSET_PARAMS )
1104 /* Protected-mode jump target */
1105 .equ vc_jmp_offset, ( vc_jmp - 4 )
1107 /****************************************************************************
1108 * real_call (protected-mode near call, 32-bit virtual return address)
1109 * real_call (long-mode near call, 64-bit virtual return address)
1111 * Call a real-mode function from protected-mode or long-mode code.
1113 * The non-segment register values will be passed directly to the
1114 * real-mode code. The segment registers will be set as per
1115 * prot_to_real. The non-segment register values set by the real-mode
1116 * function will be passed back to the protected-mode or long-mode
1117 * caller. A result of this is that this routine cannot be called
1118 * directly from C code, since it clobbers registers that the C ABI
1119 * expects the callee to preserve.
1121 * librm.h defines a convenient macro REAL_CODE() for using real_call.
1122 * See librm.h and realmode.h for details and examples.
1125 * function : offset within .text16 of real-mode function to call
1128 ****************************************************************************
1131 RC_OFFSET_REGS: .space SIZEOF_I386_REGS
1133 RC_OFFSET_FUNCTION_COPY:.space 4
1135 RC_OFFSET_LREGS: .space SIZEOF_X86_64_REGS
1136 RC_OFFSET_LREG_RETADDR: .space SIZEOF_ADDR
1138 RC_OFFSET_RETADDR: .space SIZEOF_ADDR
1140 RC_OFFSET_FUNCTION: .space SIZEOF_ADDR
1144 .section ".text.real_call", "ax", @progbits
1148 .if64 ; /* Preserve registers and switch to protected mode, if applicable */
1149 call long_preserve_regs
1153 /* Create register dump and function pointer copy on PM stack */
1154 pushl ( RC_OFFSET_FUNCTION - RC_OFFSET_FUNCTION_COPY - 4 )(%esp)
1157 /* Switch to real mode and move register dump to RM stack */
1158 movl $RC_OFFSET_REGS_END, %ecx
1159 movl $RC_TMP_END, %edx
1161 movl $VIRTUAL(rm_default_gdtr_idtr), %esi
1163 .section ".text16.real_call", "ax", @progbits
1166 /* Call real-mode function */
1168 call *( rm_tmpbuf + RC_TMP_FUNCTION )
1171 /* For sanity's sake, clear the direction flag as soon as possible */
1174 /* Switch to protected mode and move register dump back to PM stack */
1175 movl $RC_OFFSET_REGS_END, %ecx
1177 pushl $VIRTUAL(rc_pmode)
1179 .section ".text.real_call", "ax", @progbits
1182 /* Restore registers */
1185 .if64 ; /* Switch to long mode and restore registers, if applicable */
1188 call long_restore_regs
1190 /* Return and discard function parameters */
1191 ret $( RC_OFFSET_END - RC_OFFSET_PARAMS )
1194 /* Default real-mode global and interrupt descriptor table registers */
1195 .section ".data.rm_default_gdtr_idtr", "aw", @progbits
1196 rm_default_gdtr_idtr:
1197 .word 0 /* Global descriptor table limit */
1198 .long 0 /* Global descriptor table base */
1199 .word 0x03ff /* Interrupt descriptor table limit */
1200 .long 0 /* Interrupt descriptor table base */
1202 /****************************************************************************
1203 * phys_call (protected-mode near call, 32-bit virtual return address)
1204 * phys_call (long-mode near call, 64-bit virtual return address)
1206 * Call a function with flat 32-bit physical addressing
1208 * The non-segment register values will be passed directly to the
1209 * function. The segment registers will be set for flat 32-bit
1210 * physical addressing. The non-segment register values set by the
1211 * function will be passed back to the caller.
1213 * librm.h defines a convenient macro PHYS_CODE() for using phys_call.
1216 * function : virtual (sic) address of function to call
1218 ****************************************************************************
1222 PHC_OFFSET_LREGS: .space SIZEOF_X86_64_REGS
1223 PHC_OFFSET_LREG_RETADDR:.space SIZEOF_ADDR
1225 PHC_OFFSET_RETADDR: .space SIZEOF_ADDR
1227 PHC_OFFSET_FUNCTION: .space SIZEOF_ADDR
1231 .section ".text.phys_call", "ax", @progbits
1235 .if64 ; /* Preserve registers and switch to protected mode, if applicable */
1236 call long_preserve_regs
1240 /* Adjust function pointer to a physical address */
1242 movl VIRTUAL(virt_offset), %ebp
1243 addl %ebp, ( PHC_OFFSET_FUNCTION + 4 /* saved %ebp */ )(%esp)
1246 /* Switch to physical addresses */
1250 call *PHC_OFFSET_FUNCTION(%esp)
1252 /* For sanity's sake, clear the direction flag as soon as possible */
1255 /* Switch to virtual addresses */
1258 .if64 ; /* Switch to long mode and restore registers, if applicable */
1261 call long_restore_regs
1263 /* Return and discard function parameters */
1264 ret $( PHC_OFFSET_END - PHC_OFFSET_PARAMS )
1266 /****************************************************************************
1267 * phys_to_long (protected-mode near call, 32-bit physical return address)
1271 ****************************************************************************
1275 .section ".text.phys_to_long", "ax", @progbits
1279 /* Switch to virtual addresses */
1282 /* Convert to 32-bit virtual return address */
1284 movl VIRTUAL(virt_offset), %eax
1288 /* Switch to long mode and return */
1291 /* Expose as _phys_to_virt for use by COMBOOT */
1292 .globl _phys_to_virt
1293 .equ _phys_to_virt, phys_to_long
1297 /****************************************************************************
1298 * long_to_phys (long-mode near call, 64-bit virtual return address)
1302 ****************************************************************************
1306 .section ".text.long_to_phys", "ax", @progbits
1310 /* Switch to protected mode */
1314 /* Convert to 32-bit virtual return address */
1317 /* Switch to physical addresses and return */
1320 /* Expose as _virt_to_phys for use by COMBOOT */
1321 .globl _virt_to_phys
1322 .equ _virt_to_phys, long_to_phys
1326 /****************************************************************************
1327 * flatten_real_mode (real-mode near call)
1329 * Switch to flat real mode
1331 ****************************************************************************
1333 .section ".text16.flatten_real_mode", "ax", @progbits
1335 .globl flatten_real_mode
1337 /* Modify GDT to use flat real mode */
1338 movb $0x8f, real_cs + 6
1339 movb $0x8f, real_ds + 6
1340 /* Call dummy protected-mode function */
1341 virtcall flatten_dummy
1343 movb $0x00, real_cs + 6
1344 movb $0x00, real_ds + 6
1348 .section ".text.flatten_dummy", "ax", @progbits
1353 /****************************************************************************
1356 * Used by the protected-mode and long-mode interrupt vectors to call
1357 * the interrupt() function.
1359 * May be entered with either physical or virtual stack segment.
1360 ****************************************************************************
1362 .section ".text.interrupt_wrapper", "ax", @progbits
1364 .globl interrupt_wrapper
1366 /* Preserve registers (excluding already-saved %eax and
1367 * otherwise unused registers which are callee-save for both
1368 * 32-bit and 64-bit ABIs).
1376 /* Expand IRQ number to whole %eax register */
1379 .if64 ; /* Skip transition to long mode, if applicable */
1384 /* Preserve segment registers and original %esp */
1392 /* Switch to virtual addressing */
1395 /* Switch to long mode */
1399 1: /* Preserve long-mode caller-save registers */
1405 /* Expand IRQ number to whole %rdi register */
1408 /* Call interrupt handler */
1411 /* Restore long-mode caller-save registers */
1417 /* Skip transition back to protected mode, if applicable */
1421 /* Switch to protected mode */
1426 /* Restore segment registers and original %esp */
1434 1: /* Restore registers */
1442 /* Return from interrupt (with REX prefix if required) */
1443 .if64 ; jne 1f ; .byte 0x48 ; .endif
1446 /****************************************************************************
1449 ****************************************************************************
1451 .section ".pages", "aw", @nobits
1454 /* Page map level 4 entries (PML4Es)
1458 * - PML4E[0x000] covering [0x0000000000000000-0x0000007fffffffff]
1459 * - PML4E[0x1ff] covering [0xffffff8000000000-0xffffffffffffffff]
1461 * These point to the PDPT. This creates some aliased
1462 * addresses within unused portions of the 64-bit address
1463 * space, but allows us to use just a single PDPT.
1465 * - PDE[...] covering arbitrary 2MB portions of I/O space
1467 * These are 2MB pages created by ioremap() to cover I/O
1472 .size pml4e, . - pml4e
1475 .equ io_pages, pml4e
1477 /* Page directory pointer table entries (PDPTEs)
1481 * - PDPTE[0x000] covering [0x0000000000000000-0x000000003fffffff]
1482 * - PDPTE[0x001] covering [0x0000000040000000-0x000000007fffffff]
1483 * - PDPTE[0x002] covering [0x0000000080000000-0x00000000bfffffff]
1484 * - PDPTE[0x003] covering [0x00000000c0000000-0x00000000ffffffff]
1486 * These point to the appropriate page directories (in pde_low)
1487 * used to identity-map the whole of the 32-bit address space.
1489 * - PDPTE[0x004] covering [0x0000000100000000-0x000000013fffffff]
1491 * This points back to the PML4, allowing the PML4 to be
1492 * (ab)used to hold 2MB pages used for I/O device addresses.
1494 * - PDPTE[0x1ff] covering [0xffffffffc0000000-0xffffffffffffffff]
1496 * This points back to the PDPT itself, allowing the PDPT to be
1497 * (ab)used to hold PDEs covering .textdata.
1499 * - PDE[N-M] covering [_textdata,_end)
1501 * These are used to point to the page tables (in pte_textdata)
1502 * used to map our .textdata section. Note that each PDE
1503 * covers 2MB, so we are likely to use only a single PDE in
1508 .size pdpte, . - pdpte
1509 .equ pde_textdata, pdpte /* (ab)use */
1511 /* Page directory entries (PDEs) for the low 4GB
1513 * This comprises 2048 2MB pages to identity-map the whole of
1514 * the 32-bit address space.
1517 .equ PDE_LOW_PTES, ( SIZEOF_LOW_4GB / SIZEOF_2MB_PAGE )
1518 .equ PDE_LOW_PTS, ( ( PDE_LOW_PTES * SIZEOF_PTE ) / SIZEOF_PT )
1519 .space ( PDE_LOW_PTS * SIZEOF_PT )
1520 .size pde_low, . - pde_low
1522 /* Page table entries (PTEs) for .textdata
1524 * This comprises enough 4kB pages to map the whole of
1525 * .textdata. The required number of PTEs is calculated by
1526 * the linker script.
1528 * Note that these mappings do not cover the PTEs themselves.
1529 * This does not matter, since code running with paging
1530 * enabled never needs to access these PTEs.
1533 /* Allocated by linker script; must be at the end of .textdata */
1535 .section ".bss.pml4", "aw", @nobits
1538 /****************************************************************************
1539 * init_pages (protected-mode near call)
1541 * Initialise the page tables ready for long mode.
1544 * %edi : virt_offset
1545 ****************************************************************************
1547 .section ".text.init_pages", "ax", @progbits
1550 /* Initialise PML4Es for low 4GB and negative 2GB */
1551 leal ( VIRTUAL(pdpte) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
1552 movl %eax, VIRTUAL(pml4e)
1553 movl %eax, ( VIRTUAL(pml4e) + SIZEOF_PT - SIZEOF_PTE )
1555 /* Initialise PDPTE for negative 1GB */
1556 movl %eax, ( VIRTUAL(pdpte) + SIZEOF_PT - SIZEOF_PTE )
1558 /* Initialise PDPTE for I/O space */
1559 leal ( VIRTUAL(pml4e) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
1560 movl %eax, ( VIRTUAL(pdpte) + ( PDE_LOW_PTS * SIZEOF_PTE ) )
1562 /* Initialise PDPTEs for low 4GB */
1563 movl $PDE_LOW_PTS, %ecx
1564 leal ( VIRTUAL(pde_low) + ( PDE_LOW_PTS * SIZEOF_PT ) + \
1565 ( PG_P | PG_RW | PG_US ) )(%edi), %eax
1566 1: subl $SIZEOF_PT, %eax
1567 movl %eax, ( VIRTUAL(pdpte) - SIZEOF_PTE )(,%ecx,SIZEOF_PTE)
1570 /* Initialise PDEs for low 4GB */
1571 movl $PDE_LOW_PTES, %ecx
1572 leal ( 0 + ( PG_P | PG_RW | PG_US | PG_PS ) ), %eax
1573 1: subl $SIZEOF_2MB_PAGE, %eax
1574 movl %eax, ( VIRTUAL(pde_low) - SIZEOF_PTE )(,%ecx,SIZEOF_PTE)
1577 /* Initialise PDEs for .textdata */
1578 movl $_textdata_pdes, %ecx
1579 leal ( VIRTUAL(_etextdata) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
1580 movl $VIRTUAL(_textdata), %ebx
1581 shrl $( SIZEOF_2MB_PAGE_LOG2 - SIZEOF_PTE_LOG2 ), %ebx
1582 andl $( SIZEOF_PT - 1 ), %ebx
1583 1: subl $SIZEOF_PT, %eax
1584 movl %eax, (VIRTUAL(pde_textdata) - SIZEOF_PTE)(%ebx,%ecx,SIZEOF_PTE)
1587 /* Initialise PTEs for .textdata */
1588 movl $_textdata_ptes, %ecx
1589 leal ( VIRTUAL(_textdata) + ( PG_P | PG_RW | PG_US ) )(%edi), %eax
1590 addl $_textdata_paged_len, %eax
1591 1: subl $SIZEOF_4KB_PAGE, %eax
1592 movl %eax, ( VIRTUAL(pte_textdata) - SIZEOF_PTE )(,%ecx,SIZEOF_PTE)
1595 /* Record PML4 physical address */
1596 leal VIRTUAL(pml4e)(%edi), %eax
1597 movl %eax, VIRTUAL(pml4)