Index: head-2006-02-16/arch/x86_64/kernel/head-xen.S =================================================================== --- head-2006-02-16.orig/arch/x86_64/kernel/head-xen.S 2006-02-17 11:01:42.000000000 +0100 +++ head-2006-02-16/arch/x86_64/kernel/head-xen.S 2006-02-17 11:01:24.000000000 +0100 @@ -14,15 +14,6 @@ #include - -.section __xen_guest - .ascii "GUEST_OS=linux,GUEST_VER=2.6" - .ascii ",XEN_VER=xen-3.0" - .ascii ",VIRT_BASE=0xffffffff80000000" - .ascii ",HYPERCALL_PAGE=0x10a" /* __pa(hypercall_page) >> 12 */ - .ascii ",LOADER=generic" - .byte 0 - #include #include #include @@ -30,63 +21,67 @@ #include #include #include - -/* we are not able to switch in one step to the final KERNEL ADRESS SPACE - * because we need identity-mapped pages on setup so define __START_KERNEL to - * 0x100000 for this stage - * - */ + +.macro utoh value, lower=1 + .if (\value) < 0 || (\value) >= 0x10 + utoh (((\value)>>4)&0x0fffffffffffffff), (\lower) + .elseif (\value) >= 0x10 + utoh ((\value)>>4), (\lower) + .endif + .if ((\value) & 0xf) < 10 + .byte '0' + ((\value) & 0xf) + .elseif (\lower) + .byte 'a' + ((\value) & 0xf) - 10 + .else + .byte 'A' + ((\value) & 0xf) - 10 + .endif +.endm + +#define HYPERCALL_PAGE_OFFSET 0x7000 + +.section __xen_guest + .ascii "GUEST_OS=linux,GUEST_VER=2.6" + .ascii ",XEN_VER=xen-3.0" + .ascii ",VIRT_BASE=0x"; utoh __START_KERNEL_map + .ascii ",HYPERCALL_PAGE=0x"; utoh ((__PHYSICAL_START+HYPERCALL_PAGE_OFFSET)>>PAGE_SHIFT) + .ascii ",LOADER=generic" + .byte 0 .text .code64 .globl startup_64 startup_64: ENTRY(_start) - movq %rsi,xen_start_info(%rip) - -#ifdef CONFIG_SMP -ENTRY(startup_64_smp) -#endif /* CONFIG_SMP */ - - cld - - movq init_rsp(%rip),%rsp + movq $(init_thread_union+THREAD_SIZE-8),%rsp /* zero EFLAGS after setting rsp */ pushq $0 popfq - movq initial_code(%rip),%rax - jmp *%rax - - /* SMP bootup changes these two */ - .globl initial_code -initial_code: - .quad x86_64_start_kernel - .globl init_rsp -init_rsp: - .quad init_thread_union+THREAD_SIZE-8 + /* rsi is pointer to startup info structure. + pass it to C */ + movq %rsi,%rdi + jmp x86_64_start_kernel +#ifndef CONFIG_XEN ENTRY(early_idt_handler) + cmpl $2,early_recursion_flag(%rip) + jz 1f + incl early_recursion_flag(%rip) xorl %eax,%eax movq 8(%rsp),%rsi # get rip movq (%rsp),%rdx + movq %cr2,%rcx leaq early_idt_msg(%rip),%rdi -1: hlt # generate #GP + call early_printk + cmpl $2,early_recursion_flag(%rip) + jz 1f + call dump_stack +1: hlt jmp 1b +early_recursion_flag: + .long 0 early_idt_msg: .asciz "PANIC: early exception rip %lx error %lx cr2 %lx\n" - -#if 0 -ENTRY(lgdt_finish) - movl $(__USER_DS),%eax # DS/ES contains default USER segment - movw %ax,%ds - movw %ax,%es - movl $(__KERNEL_DS),%eax - movw %ax,%ss # after changing gdt. - popq %rax # get the retrun address - pushq $(__KERNEL_CS) - pushq %rax - lretq #endif ENTRY(stext) @@ -96,13 +91,17 @@ ENTRY(_stext) #define NEXT_PAGE(name) \ $page = $page + 1; \ .org $page * 0x1000; \ - phys_/**/name = $page * 0x1000 + __PHYSICAL_START; \ ENTRY(name) NEXT_PAGE(init_level4_pgt) /* This gets initialized in x86_64_start_kernel */ .fill 512,8,0 +#ifndef CONFIG_XEN +NEXT_PAGE(level3_ident_pgt) + .quad phys_level2_ident_pgt | 0x007 + .fill 511,8,0 +#else /* * We update two pgd entries to make kernel and user pgd consistent * at pgd_populate(). It can be used for kernel modules. So we place @@ -112,16 +111,19 @@ NEXT_PAGE(init_level4_pgt) */ NEXT_PAGE(init_level4_user_pgt) .fill 512,8,0 +#endif - /* - * In Xen the following pre-initialized pgt entries are re-initialized. - */ NEXT_PAGE(level3_kernel_pgt) +#ifndef CONFIG_XEN .fill 510,8,0 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */ .quad phys_level2_kernel_pgt | 0x007 .fill 1,8,0 +#else + .fill 512,8,0 +#endif +#ifndef CONFIG_XEN NEXT_PAGE(level2_ident_pgt) /* 40MB for bootup. */ i = 0 @@ -133,8 +135,17 @@ NEXT_PAGE(level2_ident_pgt) .globl temp_boot_pmds temp_boot_pmds: .fill 492,8,0 +#else + /* + * This is used for vsyscall area mapping as we have a different + * level4 page table for user. + */ +NEXT_PAGE(level3_user_pgt) + .fill 512,8,0 +#endif NEXT_PAGE(level2_kernel_pgt) +#ifndef CONFIG_XEN /* 40MB kernel mapping. The kernel code cannot be bigger than that. When you change this change KERNEL_TEXT_SIZE in page.h too. */ /* (2^48-(2*1024*1024*1024)-((2^39)*511)-((2^30)*510)) = 0 */ @@ -145,54 +156,24 @@ NEXT_PAGE(level2_kernel_pgt) .endr /* Module mapping starts here */ .fill 492,8,0 - - /* - * This is used for vsyscall area mapping as we have a different - * level4 page table for user. - */ -NEXT_PAGE(level3_user_pgt) - .fill 512,8,0 - -NEXT_PAGE(cpu_gdt_table) -/* The TLS descriptors are currently at a different place compared to i386. - Hopefully nobody expects them at a fixed place (Wine?) */ - .quad 0x0000000000000000 /* NULL descriptor */ - .quad 0x0 /* unused */ - .quad 0x00affa000000ffff /* __KERNEL_CS */ - .quad 0x00cff2000000ffff /* __KERNEL_DS */ - .quad 0x00cffa000000ffff /* __USER32_CS */ - .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */ - .quad 0x00affa000000ffff /* __USER_CS */ - .quad 0x00cffa000000ffff /* __KERNEL32_CS */ - .quad 0,0 /* TSS */ - .quad 0,0 /* LDT */ - .quad 0,0,0 /* three TLS descriptors */ - .quad 0 /* unused */ -gdt_end: -#if 0 - /* asm/segment.h:GDT_ENTRIES must match this */ - /* This should be a multiple of the cache line size */ - /* GDTs of other CPUs are now dynamically allocated */ - - /* zero the remaining page */ - .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 +#else + .fill 512,8,0 #endif NEXT_PAGE(empty_zero_page) + .skip PAGE_SIZE +#ifndef CONFIG_XEN NEXT_PAGE(level3_physmem_pgt) .quad phys_level2_kernel_pgt | 0x007 /* so that __va works even before pagetable_init */ - -NEXT_PAGE(hypercall_page) -.if (phys_hypercall_page - 0x10a000) - /* cause compiler error if the hypercall_page is at a - * different address than expected. */ - .quad __adjust_hypercall_page_in_header -.endif - .fill 512,8,0 +#endif #undef NEXT_PAGE +.org HYPERCALL_PAGE_OFFSET +ENTRY(hypercall_page) + .fill 512,8,0 + .data #ifndef CONFIG_XEN @@ -246,8 +227,35 @@ gdt: * Also sysret mandates a special GDT layout */ -.align PAGE_SIZE + .section .data.page_aligned, "aw" + .align PAGE_SIZE + +/* The TLS descriptors are currently at a different place compared to i386. + Hopefully nobody expects them at a fixed place (Wine?) */ + +ENTRY(cpu_gdt_table) + .quad 0x0000000000000000 /* NULL descriptor */ + .quad 0x0 /* unused */ + .quad 0x00affa000000ffff /* __KERNEL_CS */ + .quad 0x00cff2000000ffff /* __KERNEL_DS */ + .quad 0x00cffa000000ffff /* __USER32_CS */ + .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */ + .quad 0x00affa000000ffff /* __USER_CS */ + .quad 0x00cffa000000ffff /* __KERNEL32_CS */ + .quad 0,0 /* TSS */ + .quad 0,0 /* LDT */ + .quad 0,0,0 /* three TLS descriptors */ + .quad 0 /* unused */ +gdt_end: + /* asm/segment.h:GDT_ENTRIES must match this */ + /* This should be a multiple of the cache line size */ + /* GDTs of other CPUs are now dynamically allocated */ + + /* zero the remaining page */ + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 + .section .bss, "aw", @nobits + .align L1_CACHE_BYTES ENTRY(idt_table) .rept 256 .quad 0 Index: head-2006-02-16/arch/x86_64/kernel/head64-xen.c =================================================================== --- head-2006-02-16.orig/arch/x86_64/kernel/head64-xen.c 2006-02-17 11:01:42.000000000 +0100 +++ head-2006-02-16/arch/x86_64/kernel/head64-xen.c 2006-02-15 15:32:50.000000000 +0100 @@ -45,9 +45,9 @@ static void __init clear_bss(void) extern char saved_command_line[]; +#ifndef CONFIG_XEN static void __init copy_bootdata(char *real_mode_data) { -#if 0 int new_data; char * command_line; @@ -64,16 +64,19 @@ static void __init copy_bootdata(char *r command_line = (char *) ((u64)(new_data)); memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE); printk("Bootdata ok (command line is %s)\n", saved_command_line); +} #else +static void __init copy_bootdata(struct start_info *start_info) +{ int max_cmdline; if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE) max_cmdline = COMMAND_LINE_SIZE; - memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline); + memcpy(saved_command_line, start_info->cmd_line, max_cmdline); saved_command_line[max_cmdline-1] = '\0'; printk("Bootdata ok (command line is %s)\n", saved_command_line); -#endif } +#endif static void __init setup_boot_cpu_data(void) { @@ -93,21 +96,22 @@ static void __init setup_boot_cpu_data(v boot_cpu_data.x86_mask = eax & 0xf; } -void __init x86_64_start_kernel(char * real_mode_data) +void __init x86_64_start_kernel(struct start_info *start_info) { char *s; int i; + xen_start_info = start_info; if (!xen_feature(XENFEAT_auto_translated_physmap)) { phys_to_machine_mapping = - (unsigned long *)xen_start_info->mfn_list; + (unsigned long *)start_info->mfn_list; start_pfn = (__pa(xen_start_info->pt_base) >> PAGE_SHIFT) + xen_start_info->nr_pt_frames; } +#if 0 for (i = 0; i < 256; i++) set_intr_gate(i, early_idt_handler); -#if 0 asm volatile("lidt %0" :: "m" (idt_descr)); #endif @@ -115,7 +119,7 @@ void __init x86_64_start_kernel(char * r cpu_pda(i) = &boot_cpu_pda[i]; pda_init(0); - copy_bootdata(real_mode_data); + copy_bootdata(start_info); #ifdef CONFIG_SMP cpu_set(0, cpu_online_map); #endif