--- /dev/null
+XEN_ROOT=../../../..
+override XEN_TARGET_ARCH=x86_32
+CFLAGS =
+include $(XEN_ROOT)/Config.mk
+
+# Disable PIE/SSP if GCC supports them. They can break us.
+$(call cc-option-add,CFLAGS,CC,-nopie)
+$(call cc-option-add,CFLAGS,CC,-fno-stack-protector)
+$(call cc-option-add,CFLAGS,CC,-fno-stack-protector-all)
+
+CFLAGS += -Werror -fno-builtin -msoft-float
+
+%.S: %.bin
+ (od -v -t x $< | head -n -1 | \
+ sed 's/ /,0x/g' | sed 's/^[0-9]*,/ .long /') >$@
+
+%.bin: %.lnk
+ $(OBJCOPY) -O binary $< $@
+
+%.lnk: %.o
+ $(LD) $(LDFLAGS_DIRECT) -N -Ttext 0x8c000 -o $@ $<
+
+%.o: %.c
+ $(CC) $(CFLAGS) -c $< -o $@
cmp $0x2BADB002,%eax
jne not_multiboot
- /* Save the Multiboot info structure for later use. */
- mov %ebx,sym_phys(multiboot_ptr)
+ /* Save the Multiboot info struct (after relocation) for later use. */
+ mov $sym_phys(cpu0_stack)+1024,%esp
+ push %ebx
+ call reloc
+ mov %eax,sym_phys(multiboot_ptr)
/* Initialize BSS (no nasty surprises!) */
mov $sym_phys(__bss_start),%edi
#include "cmdline.S"
+reloc:
+#include "reloc.S"
+
.align 16
.globl trampoline_start, trampoline_end
trampoline_start:
--- /dev/null
+/******************************************************************************
+ * reloc.c
+ *
+ * 32-bit flat memory-map routines for relocating Multiboot structures
+ * and modules. This is most easily done early with paging disabled.
+ *
+ * Copyright (c) 2009, Citrix Systems, Inc.
+ *
+ * Authors:
+ * Keir Fraser <keir.fraser@citrix.com>
+ */
+
+asm (
+ " .text \n"
+ " .globl _start \n"
+ "_start: \n"
+ " mov $_start,%edi \n"
+ " call 1f \n"
+ "1: pop %esi \n"
+ " sub $1b-_start,%esi \n"
+ " mov $__bss_start-_start,%ecx \n"
+ " rep movsb \n"
+ " xor %eax,%eax \n"
+ " mov $_end,%ecx \n"
+ " sub %edi,%ecx \n"
+ " rep stosb \n"
+ " mov $reloc,%eax \n"
+ " jmp *%eax \n"
+ );
+
+typedef unsigned int u32;
+#include "../../../include/xen/multiboot.h"
+
+extern char _start[];
+
+static void *memcpy(void *dest, const void *src, unsigned int n)
+{
+ char *s = (char *)src, *d = dest;
+ while ( n-- )
+ *d++ = *s++;
+ return dest;
+}
+
+static void *reloc_mbi_struct(void *old, unsigned int bytes)
+{
+ static void *alloc = &_start;
+ alloc = (void *)(((unsigned long)alloc - bytes) & ~15ul);
+ return memcpy(alloc, old, bytes);
+}
+
+static char *reloc_mbi_string(char *old)
+{
+ char *p;
+ for ( p = old; *p != '\0'; p++ )
+ continue;
+ return reloc_mbi_struct(old, p - old + 1);
+}
+
+multiboot_info_t *reloc(multiboot_info_t *mbi_old)
+{
+ multiboot_info_t *mbi = reloc_mbi_struct(mbi_old, sizeof(*mbi));
+ int i;
+
+ if ( mbi->flags & MBI_CMDLINE )
+ mbi->cmdline = (u32)reloc_mbi_string((char *)mbi->cmdline);
+
+ if ( mbi->flags & MBI_MODULES )
+ {
+ module_t *mods = reloc_mbi_struct(
+ (module_t *)mbi->mods_addr, mbi->mods_count * sizeof(module_t));
+ mbi->mods_addr = (u32)mods;
+ for ( i = 0; i < mbi->mods_count; i++ )
+ if ( mods[i].string )
+ mods[i].string = (u32)reloc_mbi_string((char *)mods[i].string);
+ }
+
+ if ( mbi->flags & MBI_MEMMAP )
+ mbi->mmap_addr = (u32)reloc_mbi_struct(
+ (memory_map_t *)mbi->mmap_addr, mbi->mmap_length);
+
+ /* Mask features we don't understand or don't relocate. */
+ mbi->flags &= (MBI_MEMLIMITS |
+ MBI_DRIVES |
+ MBI_CMDLINE |
+ MBI_MODULES |
+ MBI_MEMMAP);
+
+ return mbi;
+}