-----BEGIN PGP SIGNED MESSAGE----- Hash: SHA1 Hi,
I have taken memtest reloc.c and glued it into libpayload. Check attached patch.
It adds -fPIC too.
Then I taken the tint payload and make it dynamic via:
../libpayload/bin/lpgcc -shared -o tint.elf tint.o engine.o io.o utils.o
I added -shared to following rule
$(TARGET).elf: $(OBJS)
$(CC) -shared -o $@ $(OBJS)
And -fPIC to CFLAGS
I used Qemu to test this. And it does start tint! Then I changed the loading
address with attached simple patch coreboot_change_base.patch and STILL does
work! I think I have more luck than I thought.
(Except the stack, I cheated and created the temp 4K stack, but I think this can
be fixed quite easily)
Questions:
1) Does it work really work? I can't believe it.
2) If yes I think we will need to ask Eric to re-license this for libpayload
3) I think we can use this to make coreboot_ram to run on ANY address :) if
someone manages to add -fPIC to our build system. When the coreboot_ram is
created one need to add -shared too.
Rudolf
-----BEGIN PGP SIGNATURE-----
Version: GnuPG v1.4.10 (GNU/Linux)
Comment: Using GnuPG with Mozilla - http://enigmail.mozdev.org
iEYEARECAAYFAkuFvm4ACgkQ3J9wPJqZRNWrFQCfddjeN0irx6eljQYIBSdYodkf
Rm0An2DlZGK7MG+6vqH+APlVKLHLAwzT
=7Nyh
-----END PGP SIGNATURE-----
Index: lib/libpayload.ldscript
===================================================================
--- lib/libpayload.ldscript (revision 5158)
+++ lib/libpayload.ldscript (working copy)
@@ -27,7 +27,7 @@
* SUCH DAMAGE.
*/
-BASE_ADDRESS = 0x100000;
+BASE_ADDRESS = 0x0;
OUTPUT_FORMAT(elf32-i386)
OUTPUT_ARCH(i386)
@@ -41,36 +41,50 @@
{
. = BASE_ADDRESS;
- . = ALIGN(16);
- _start = .;
-
.text : {
- *(.text._entry)
+ _start = .;
*(.text)
*(.text.*)
- }
-
+ *(.plt)
+ _etext = . ;
+ } = 0x9090
.rodata : {
*(.rodata)
*(.rodata.*)
}
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .hash : { *(.hash) }
+ .gnu.hash : { *(.gnu.hash) }
+ .dynamic : { *(.dynamic) }
+ .rel.text : { *(.rel.text .rel.text.*) }
+ .rel.rodata : { *(.rel.rodata .rel.rodata.*) }
+ .rel.data : { *(.rel.data .rel.data.*) }
+ .rel.got : { *(.rel.got .rel.got.*) }
+ .rel.plt : { *(.rel.plt .rel.plt.*) }
+
+ . = ALIGN(4);
.data : {
- *(.data)
- *(.data.*)
+ _data = .;
+ *(.data)
+ *(.data.*)
}
+ .got : {
+ *(.got.plt)
+ *(.got)
+ _edata = . ;
+ }
+ . = ALIGN(4);
+ .bss : {
+ _bss = .;
+ *(.dynbss)
+ *(.bss)
+ *(.bss.*)
+ *(COMMON)
+ /* _end must be at least 256 byte aligned */
+ . = ALIGN(256);
- _edata = .;
-
- .bss : {
- *(.sbss)
- *(.sbss.*)
- *(.bss)
- *(.bss.*)
- *(COMMON)
-
- /* Stack and heap */
-
. = ALIGN(16);
_heap = .;
. += HEAP_SIZE;
@@ -81,9 +95,9 @@
. += STACK_SIZE;
. = ALIGN(16);
_stack = .;
- }
- _end = .;
- /DISCARD/ : { *(.comment) }
+ _end = .;
+ }
+ /DISCARD/ : { *(*) }
}
Index: Makefile
===================================================================
--- Makefile (revision 5158)
+++ Makefile (working copy)
@@ -101,7 +101,7 @@
STACKPROTECT += $(call cc-option, -fno-stack-protector,)
# TODO: Re-add -Os as soon as we find out why it caused problems.
-CFLAGS := -Wall -Werror $(STACKPROTECT) -nostdinc $(INCLUDES) -ffreestanding
+CFLAGS := -Wall -Werror $(STACKPROTECT) -nostdinc $(INCLUDES) -ffreestanding -fPIC
all: lib
Index: arch/i386/head.S
===================================================================
--- arch/i386/head.S (revision 5158)
+++ arch/i386/head.S (working copy)
@@ -38,6 +38,8 @@
* change anything.
*/
_entry:
+ jmp 0f
+
call _init
/* We're back - go back to the bootloader. */
@@ -62,22 +64,42 @@
* This function saves off the previous stack and switches us to our
* own execution environment.
*/
+0:
_init:
/* No interrupts, please. */
cli
+ /* Ensure I have a boot_stack pointer */
+ testl %esp, %esp
+ jnz 0f
+ movl $(0x1000 + _GLOBAL_OFFSET_TABLE_), %esp
+ leal boot_stack_...@gotoff(%esp), %esp
+0:
+
+ /* Load the GOT pointer */
+ call 0f
+0: popl %ebx
+ addl $_GLOBAL_OFFSET_TABLE_+[.-0b], %ebx
+
+ /* Pick the appropriate boot_stack address */
+ leal boot_stack_...@gotoff(%ebx), %esp
+
/* Store current stack pointer. */
movl %esp, %esi
/* Store EAX and EBX */
- movl %eax,loader_eax
- movl %ebx,loader_ebx
+ movl %eax,loader_...@gotoff(%ebx)
+ movl %ebx,loader_...@gotoff(%ebx)
+ leal _dl_st...@gotoff(%ebx), %eax
+ call *%eax
+
/* Setup new stack. */
- movl $_stack, %ebx
+/* movl $_stack, %ebx
movl %ebx, %esp
+*/
/* Save old stack pointer. */
pushl %esi
@@ -95,3 +117,11 @@
/* Return to the original context. */
ret
+.bss
+.balign 16
+boot_stack:
+ .globl boot_stack
+ . = . + 4096
+boot_stack_top:
+ .globl boot_stack_top
+.previous
Index: arch/i386/Makefile.inc
===================================================================
--- arch/i386/Makefile.inc (revision 5158)
+++ arch/i386/Makefile.inc (working copy)
@@ -29,7 +29,7 @@
TARGETS-y += arch/i386/head.S.o arch/i386/main.o arch/i386/sysinfo.o
TARGETS-y += arch/i386/timer.o arch/i386/coreboot.o arch/i386/util.S.o
-TARGETS-y += arch/i386/exec.S.o arch/i386/virtual.o
+TARGETS-y += arch/i386/exec.S.o arch/i386/virtual.o arch/i386/reloc.o
# Multiboot support is configurable
TARGETS-$(CONFIG_MULTIBOOT) += arch/i386/multiboot.o
Index: arch/i386/reloc.c
===================================================================
--- arch/i386/reloc.c (revision 0)
+++ arch/i386/reloc.c (revision 0)
@@ -0,0 +1,268 @@
+/* reloc.c - MemTest-86 Version 3.3
+ *
+ * Released under version 2 of the Gnu Public License.
+ * By Eric Biederman
+ */
+
+//#include "stddef.h"
+//#include "stdint.h"
+#include <libpayload.h>
+#include "elf.h"
+
+#define __ELF_NATIVE_CLASS 32
+#define ELF_MACHINE_NO_RELA 1
+
+/* We use this macro to refer to ELF types independent of the native wordsize.
+ `ElfW(TYPE)' is used in place of `Elf32_TYPE' or `Elf64_TYPE'. */
+
+#define ElfW(type) _ElfW (Elf, __ELF_NATIVE_CLASS, type)
+#define _ElfW(e,w,t) _ElfW_1 (e, w, _##t)
+#define _ElfW_1(e,w,t) e##w##t
+/* We use this macro to refer to ELF types independent of the native wordsize.
+ `ElfW(TYPE)' is used in place of `Elf32_TYPE' or `Elf64_TYPE'. */
+#define ELFW(type) _ElfW (ELF, __ELF_NATIVE_CLASS, type)
+
+#define assert(expr) ((void) 0)
+
+ /* This #define produces dynamic linking inline functions for
+ bootstrap relocation instead of general-purpose relocation. */
+#define RTLD_BOOTSTRAP
+
+struct link_map
+{
+ ElfW(Addr) l_addr; /* Current load address */
+ ElfW(Addr) ll_addr; /* Last load address */
+ ElfW(Dyn) *l_ld;
+ /* Indexed pointers to dynamic section.
+ [0,DT_NUM) are indexed by the processor-independent tags.
+ [DT_NUM,DT_NUM+DT_PROCNUM) are indexed by the tag minus DT_LOPROC.
+ [DT_NUM+DT_PROCNUM,DT_NUM+DT_PROCNUM+DT_EXTRANUM) are indexed
+ by DT_EXTRATAGIDX(tagvalue) and
+ [DT_NUM+DT_PROCNUM,
+ DT_NUM+DT_PROCNUM+DT_EXTRANUM)
+ are indexed by DT_EXTRATAGIDX(tagvalue) (see <elf.h>). */
+
+ ElfW(Dyn) *l_info[DT_NUM + DT_PROCNUM + DT_EXTRANUM];
+};
+
+
+/* Return the link-time address of _DYNAMIC. Conveniently, this is the
+ first element of the GOT. This must be inlined in a function which
+ uses global data. */
+static inline Elf32_Addr __attribute__ ((unused))
+elf_machine_dynamic (void)
+{
+ register Elf32_Addr *got asm ("%ebx");
+ return *got;
+}
+
+/* Return the run-time load address of the shared object. */
+static inline Elf32_Addr __attribute__ ((unused))
+elf_machine_load_address (void)
+{
+ Elf32_Addr addr;
+ asm volatile ("leal _st...@gotoff(%%ebx), %0\n"
+ : "=r" (addr) : : "cc");
+ return addr;
+}
+
+/* Perform the relocation specified by RELOC and SYM (which is fully resolved).
+ MAP is the object containing the reloc. */
+static inline void
+elf_machine_rel (struct link_map *map, const Elf32_Rel *reloc,
+ const Elf32_Sym *sym, Elf32_Addr *const reloc_addr)
+{
+ Elf32_Addr ls_addr, s_addr;
+ Elf32_Addr value;
+ if (ELF32_R_TYPE (reloc->r_info) == R_386_RELATIVE)
+ {
+ *reloc_addr += map->l_addr - map->ll_addr;
+ return;
+ }
+ if (ELF32_R_TYPE(reloc->r_info) == R_386_NONE) {
+ return;
+ }
+ value = sym->st_value;
+ /* Every section except the undefined section has a base of map->l_addr */
+ ls_addr = sym->st_shndx == SHN_UNDEF ? 0 : map->ll_addr;
+ s_addr = sym->st_shndx == SHN_UNDEF ? 0 : map->l_addr;
+
+ switch (ELF32_R_TYPE (reloc->r_info))
+ {
+ case R_386_COPY:
+ {
+ /* Roll memcpy by hand as we don't have function calls yet. */
+ unsigned char *dest, *src;
+ long i;
+ dest = (unsigned char *)reloc_addr;
+ src = (unsigned char *)(value + s_addr);
+ for(i = 0; i < sym->st_size; i++) {
+ dest[i] = src[i];
+ }
+ }
+ break;
+ case R_386_GLOB_DAT:
+ *reloc_addr = s_addr + value;
+ break;
+ case R_386_JMP_SLOT:
+ *reloc_addr = s_addr + value;
+ break;
+ case R_386_32:
+ if (map->ll_addr == 0) {
+ *reloc_addr += value;
+ }
+ *reloc_addr += s_addr - ls_addr;
+ break;
+ case R_386_PC32:
+ if (map->ll_addr == 0) {
+ *reloc_addr += value - reloc->r_offset;
+ }
+ *reloc_addr += (s_addr - map->l_addr) - (ls_addr - map->ll_addr);
+ break;
+ default:
+ assert (! "unexpected dynamic reloc type");
+ break;
+ }
+}
+
+/* Read the dynamic section at DYN and fill in INFO with indices DT_*. */
+
+static inline void __attribute__ ((unused))
+elf_get_dynamic_info(ElfW(Dyn) *dyn, ElfW(Addr) l_addr,
+ ElfW(Dyn) *info[DT_NUM + DT_PROCNUM + DT_EXTRANUM])
+{
+ if (! dyn)
+ return;
+
+ while (dyn->d_tag != DT_NULL)
+ {
+ if (dyn->d_tag < DT_NUM)
+ info[dyn->d_tag] = dyn;
+ else if (dyn->d_tag >= DT_LOPROC &&
+ dyn->d_tag < DT_LOPROC + DT_PROCNUM)
+ info[dyn->d_tag - DT_LOPROC + DT_NUM] = dyn;
+ else if ((Elf32_Word) DT_EXTRATAGIDX (dyn->d_tag) < DT_EXTRANUM)
+ info[DT_EXTRATAGIDX (dyn->d_tag) + DT_NUM + DT_PROCNUM
+ ] = dyn;
+ else
+ assert (! "bad dynamic tag");
+ ++dyn;
+ }
+
+ if (info[DT_PLTGOT] != NULL)
+ info[DT_PLTGOT]->d_un.d_ptr += l_addr;
+ if (info[DT_STRTAB] != NULL)
+ info[DT_STRTAB]->d_un.d_ptr += l_addr;
+ if (info[DT_SYMTAB] != NULL)
+ info[DT_SYMTAB]->d_un.d_ptr += l_addr;
+#if ! ELF_MACHINE_NO_RELA
+ if (info[DT_RELA] != NULL)
+ {
+ assert (info[DT_RELAENT]->d_un.d_val == sizeof (ElfW(Rela)));
+ info[DT_RELA]->d_un.d_ptr += l_addr;
+ }
+#endif
+#if ! ELF_MACHINE_NO_REL
+ if (info[DT_REL] != NULL)
+ {
+ assert (info[DT_RELENT]->d_un.d_val == sizeof (ElfW(Rel)));
+ info[DT_REL]->d_un.d_ptr += l_addr;
+ }
+#endif
+ if (info[DT_PLTREL] != NULL)
+ {
+#if ELF_MACHINE_NO_RELA
+ assert (info[DT_PLTREL]->d_un.d_val == DT_REL);
+#elif ELF_MACHINE_NO_REL
+ assert (info[DT_PLTREL]->d_un.d_val == DT_RELA);
+#else
+ assert (info[DT_PLTREL]->d_un.d_val == DT_REL
+ || info[DT_PLTREL]->d_un.d_val == DT_RELA);
+#endif
+ }
+ if (info[DT_JMPREL] != NULL)
+ info[DT_JMPREL]->d_un.d_ptr += l_addr;
+}
+
+
+
+/* Perform the relocations in MAP on the running program image as specified
+ by RELTAG, SZTAG. If LAZY is nonzero, this is the first pass on PLT
+ relocations; they should be set up to call _dl_runtime_resolve, rather
+ than fully resolved now. */
+
+static inline void
+elf_dynamic_do_rel (struct link_map *map,
+ ElfW(Addr) reladdr, ElfW(Addr) relsize)
+{
+ const ElfW(Rel) *r = (const void *) reladdr;
+ const ElfW(Rel) *end = (const void *) (reladdr + relsize);
+
+ const ElfW(Sym) *const symtab =
+ (const void *) map->l_info[DT_SYMTAB]->d_un.d_ptr;
+
+ for (; r < end; ++r) {
+ elf_machine_rel (map, r, &symtab[ELFW(R_SYM) (r->r_info)],
+ (void *) (map->l_addr + r->r_offset));
+ }
+}
+
+
+void _dl_start(void)
+{
+ static Elf32_Addr last_load_address = 0;
+ struct link_map map;
+ size_t cnt;
+
+
+ /* Partly clean the `map' structure up. Don't use `memset'
+ since it might nor be built in or inlined and we cannot make function
+ calls at this point. */
+ for (cnt = 0; cnt < sizeof(map.l_info) / sizeof(map.l_info[0]); ++cnt) {
+ map.l_info[cnt] = 0;
+ }
+
+ /* Get the last load address */
+ map.ll_addr = last_load_address;
+
+ /* Figure out the run-time load address of the dynamic linker itself. */
+ last_load_address = map.l_addr = elf_machine_load_address();
+
+ /* Read our own dynamic section and fill in the info array. */
+ map.l_ld = (void *)map.l_addr + elf_machine_dynamic();
+
+ elf_get_dynamic_info (map.l_ld, map.l_addr - map.ll_addr, map.l_info);
+
+ /* Relocate ourselves so we can do normal function calls and
+ * data access using the global offset table.
+ */
+#if !ELF_MACHINE_NO_REL
+ elf_dynamic_do_rel(&map,
+ map.l_info[DT_REL]->d_un.d_ptr,
+ map.l_info[DT_RELSZ]->d_un.d_val);
+ if (map.l_info[DT_PLTREL]->d_un.d_val == DT_REL) {
+ elf_dynamic_do_rel(&map,
+ map.l_info[DT_JMPREL]->d_un.d_ptr,
+ map.l_info[DT_PLTRELSZ]->d_un.d_val);
+ }
+#endif
+
+#if !ELF_MACHINE_NO_RELA
+ elf_dynamic_do_rela(&map,
+ map.l_info[DT_RELA]->d_un.d_ptr,
+ map.l_info[DT_RELASZ]->d_un.d_val);
+ if (map.l_info[DT_PLTREL]->d_un.d_val == DT_RELA) {
+ elf_dynamic_do_rela(&map,
+ map.l_info[DT_JMPREL]->d_un.d_ptr,
+ map.l_info[DT_PLTRELSZ]->d_un.d_val);
+ }
+#endif
+
+ /* Now life is sane; we can call functions and access global data.
+ Set up to use the operating system facilities, and find out from
+ the operating system's program loader where to find the program
+ header table in core. Put the rest of _dl_start into a separate
+ function, that way the compiler cannot put accesses to the GOT
+ before ELF_DYNAMIC_RELOCATE. */
+ return;
+}
Index: src/boot/selfboot.c
===================================================================
--- src/boot/selfboot.c (revision 5134)
+++ src/boot/selfboot.c (working copy)
@@ -327,6 +327,7 @@
return ret;
}
+#define RELO 0x2000000
static int build_self_segment_list(
struct segment *head,
@@ -355,7 +356,7 @@
segment->type == PAYLOAD_SEGMENT_CODE ? "code" : "data",
ntohl(segment->compression));
new = malloc(sizeof(*new));
- new->s_dstaddr = ntohl((u32) segment->load_addr);
+ new->s_dstaddr = ntohl((u32) segment->load_addr) + RELO;
new->s_memsz = ntohl(segment->mem_len);
new->compression = ntohl(segment->compression);
@@ -376,13 +377,13 @@
ntohl(segment->mem_len));
new = malloc(sizeof(*new));
new->s_filesz = 0;
- new->s_dstaddr = ntohl((u32) segment->load_addr);
+ new->s_dstaddr = ntohl((u32) segment->load_addr) + RELO;
new->s_memsz = ntohl(segment->mem_len);
break;
case PAYLOAD_SEGMENT_ENTRY:
printk_debug(" Entry Point 0x%p\n", (void *) ntohl((u32) segment->load_addr));
- *entry = ntohl((u32) segment->load_addr);
+ *entry = ntohl((u32) segment->load_addr) + RELO;
/* Per definition, a payload always has the entry point
* as last segment. Thus, we use the occurence of the
* entry point as break condition for the loop.
make_reloc.patch.sig
Description: Binary data
coreboot_change_base.patch.sig
Description: Binary data
-- coreboot mailing list: [email protected] http://www.coreboot.org/mailman/listinfo/coreboot

