Skip to content

Commit

Permalink
Merge pull request #37 from hermit-os/more-reloc
Browse files Browse the repository at this point in the history
feat(loader): add support for `R_ABS64` and `R_GLOB_DAT` relocation types
  • Loading branch information
mkroening authored Aug 15, 2024
2 parents 5c87198 + d5ffa60 commit e24a560
Showing 1 changed file with 99 additions and 14 deletions.
113 changes: 99 additions & 14 deletions src/elf.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,28 +5,46 @@ use core::{fmt, str};

use align_address::Align;
use goblin::elf::note::Nhdr32;
use goblin::elf::reloc::r_to_str;
use goblin::elf::section_header::{self, SHN_UNDEF};
use goblin::elf::sym::{self, STB_WEAK};
use goblin::elf64::dynamic::{self, Dyn, DynamicInfo};
use goblin::elf64::header::{self, Header};
use goblin::elf64::program_header::{self, ProgramHeader};
use goblin::elf64::reloc::{self, Rela};
use goblin::elf64::section_header::SectionHeader;
use goblin::elf64::sym::Sym;
use log::{info, warn};
use plain::Plain;

use crate::boot_info::{LoadInfo, TlsInfo};

// See https://refspecs.linuxbase.org/elf/x86_64-abi-0.98.pdf
#[cfg(target_arch = "x86_64")]
const ELF_ARCH: u16 = goblin::elf::header::EM_X86_64;
#[cfg(target_arch = "x86_64")]
const R_ABS64: u32 = goblin::elf::reloc::R_X86_64_64;
#[cfg(target_arch = "x86_64")]
const R_RELATIVE: u32 = goblin::elf::reloc::R_X86_64_RELATIVE;
#[cfg(target_arch = "x86_64")]
const R_GLOB_DAT: u32 = goblin::elf::reloc::R_X86_64_GLOB_DAT;

// See https://github.com/ARM-software/abi-aa/blob/2023Q3/aaelf64/aaelf64.rst#relocation
#[cfg(target_arch = "aarch64")]
const ELF_ARCH: u16 = goblin::elf::header::EM_AARCH64;
#[cfg(target_arch = "aarch64")]
const R_ABS64: u32 = goblin::elf::reloc::R_AARCH64_ABS64;
#[cfg(target_arch = "aarch64")]
const R_RELATIVE: u32 = goblin::elf::reloc::R_AARCH64_RELATIVE;
#[cfg(target_arch = "aarch64")]
const R_GLOB_DAT: u32 = goblin::elf::reloc::R_AARCH64_GLOB_DAT;

/// https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/v1.0/riscv-elf.adoc#relocations
#[cfg(target_arch = "riscv64")]
const ELF_ARCH: u16 = goblin::elf::header::EM_RISCV;
#[cfg(target_arch = "riscv64")]
const R_ABS64: u32 = goblin::elf::reloc::R_RISCV_64;
#[cfg(target_arch = "riscv64")]
const R_RELATIVE: u32 = goblin::elf::reloc::R_RISCV_RELATIVE;

/// A parsed kernel object ready for loading.
Expand All @@ -46,6 +64,9 @@ pub struct KernelObject<'a> {

/// Relocations with an explicit addend.
relas: &'a [Rela],

/// Symbol table for relocations
dynsyms: &'a [Sym],
}

struct NoteIterator<'a> {
Expand Down Expand Up @@ -111,6 +132,12 @@ impl<'a> KernelObject<'a> {
ProgramHeader::slice_from_bytes_len(&elf[start..], len).unwrap()
};

let shs = {
let start = header.e_shoff as usize;
let len = header.e_shnum as usize;
SectionHeader::slice_from_bytes_len(&elf[start..], len).unwrap()
};

// General compatibility checks
{
let class = header.e_ident[header::EI_CLASS];
Expand Down Expand Up @@ -159,8 +186,8 @@ impl<'a> KernelObject<'a> {
.find(|program_header| program_header.p_type == program_header::PT_DYNAMIC)
.map(|ph| {
let start = ph.p_offset as usize;
let len = (ph.p_filesz as usize) / dynamic::SIZEOF_DYN;
Dyn::slice_from_bytes_len(&elf[start..], len).unwrap()
let len = ph.p_filesz as usize;
Dyn::slice_from_bytes(&elf[start..][..len]).unwrap()
})
.unwrap_or_default();

Expand All @@ -175,19 +202,26 @@ impl<'a> KernelObject<'a> {

let relas = {
let start = dynamic_info.rela;
let len = dynamic_info.relacount;
Rela::slice_from_bytes_len(&elf[start..], len).unwrap()
let len = dynamic_info.relasz;
Rela::slice_from_bytes(&elf[start..][..len]).unwrap()
};

assert!(relas
let dynsyms = shs
.iter()
.all(|rela| reloc::r_type(rela.r_info) == R_RELATIVE));
.find(|section_header| section_header.sh_type == section_header::SHT_DYNSYM)
.map(|sh| {
let start = sh.sh_offset as usize;
let len = sh.sh_size as usize;
Sym::slice_from_bytes(&elf[start..][..len]).unwrap()
})
.unwrap_or_default();

Ok(KernelObject {
elf,
header,
phs,
relas,
dynsyms,
})
}

Expand Down Expand Up @@ -292,7 +326,7 @@ impl<'a> KernelObject<'a> {
let file_len = ph.p_filesz as usize;
let ph_file = &self.elf[ph.p_offset as usize..][..file_len];
// FIXME: Replace with `maybe_uninit_write_slice` once stable
let ph_file = unsafe { mem::transmute(ph_file) };
let ph_file = unsafe { mem::transmute::<&[u8], &[MaybeUninit<u8>]>(ph_file) };
ph_memory[..file_len].copy_from_slice(ph_file);
for byte in &mut ph_memory[file_len..] {
byte.write(0);
Expand All @@ -302,13 +336,64 @@ impl<'a> KernelObject<'a> {
if self.is_relocatable() {
// Perform relocations
self.relas.iter().for_each(|rela| {
assert_eq!(R_RELATIVE, reloc::r_type(rela.r_info));
let relocated = (start_addr as i64 + rela.r_addend).to_ne_bytes();
let buf = &relocated[..];
// FIXME: Replace with `maybe_uninit_write_slice` once stable
let buf = unsafe { mem::transmute(buf) };
memory[rela.r_offset as usize..][..mem::size_of_val(&relocated)]
.copy_from_slice(buf);
match reloc::r_type(rela.r_info) {
R_ABS64 => {
let sym = reloc::r_sym(rela.r_info) as usize;
let sym = &self.dynsyms[sym];

if sym::st_bind(sym.st_info) == STB_WEAK
&& u32::from(sym.st_shndx) == SHN_UNDEF
{
let memory = &memory[rela.r_offset as usize..][..8];
let memory =
unsafe { mem::transmute::<&[MaybeUninit<u8>], &[u8]>(memory) };
assert_eq!(memory, &[0; 8]);
return;
}

let relocated =
(start_addr as i64 + sym.st_value as i64 + rela.r_addend).to_ne_bytes();
let buf = &relocated[..];
// FIXME: Replace with `maybe_uninit_write_slice` once stable
let buf = unsafe { mem::transmute::<&[u8], &[MaybeUninit<u8>]>(buf) };
memory[rela.r_offset as usize..][..mem::size_of_val(&relocated)]
.copy_from_slice(buf);
}
R_RELATIVE => {
let relocated = (start_addr as i64 + rela.r_addend).to_ne_bytes();
let buf = &relocated[..];
// FIXME: Replace with `maybe_uninit_write_slice` once stable
let buf = unsafe { mem::transmute::<&[u8], &[MaybeUninit<u8>]>(buf) };
memory[rela.r_offset as usize..][..mem::size_of_val(&relocated)]
.copy_from_slice(buf);
}
#[cfg(any(target_arch = "aarch64", target_arch = "x86_64"))]
R_GLOB_DAT => {
let sym = reloc::r_sym(rela.r_info) as usize;
let sym = &self.dynsyms[sym];

if sym::st_bind(sym.st_info) == STB_WEAK
&& u32::from(sym.st_shndx) == SHN_UNDEF
{
let memory = &memory[rela.r_offset as usize..][..8];
let memory =
unsafe { mem::transmute::<&[MaybeUninit<u8>], &[u8]>(memory) };
assert_eq!(memory, &[0; 8]);
return;
}

let relocated =
(start_addr as i64 + sym.st_value as i64 + rela.r_addend).to_ne_bytes();
#[cfg(target_arch = "x86_64")]
assert_eq!(rela.r_addend, 0);
let buf = &relocated[..];
// FIXME: Replace with `maybe_uninit_write_slice` once stable
let buf = unsafe { mem::transmute::<&[u8], &[MaybeUninit<u8>]>(buf) };
memory[rela.r_offset as usize..][..mem::size_of_val(&relocated)]
.copy_from_slice(buf);
}
typ => panic!("unkown relocation type {}", r_to_str(typ, ELF_ARCH)),
}
});
}

Expand Down

0 comments on commit e24a560

Please sign in to comment.