diff --git a/build.zig b/build.zig index c53e3bd..0cbcada 100644 --- a/build.zig +++ b/build.zig @@ -1,7 +1,5 @@ const std = @import("std"); -const kernel_base = 0xFFFF_FFFF_8000_0000; - pub fn build(b: *std.Build) void { const target = b.resolveTargetQuery(.{ .cpu_arch = .x86_64, @@ -10,10 +8,6 @@ pub fn build(b: *std.Build) void { }); const optimize = b.standardOptimizeOption(.{}); - // Options - const options = b.addOptions(); - options.addOption(@TypeOf(kernel_base), "kernel_base", kernel_base); - // Modules const surtr_module = b.createModule(.{ .root_source_file = b.path("surtr/defs.zig"), @@ -23,7 +17,6 @@ pub fn build(b: *std.Build) void { }); ymir_module.addImport("ymir", ymir_module); ymir_module.addImport("surtr", surtr_module); - ymir_module.addOptions("option", options); const ymir = b.addExecutable(.{ .name = "ymir.elf", @@ -31,13 +24,13 @@ pub fn build(b: *std.Build) void { .target = target, // Freestanding x64 ELF executable .optimize = optimize, // You can choose the optimization level. .linkage = .static, + .code_model = .kernel, }); ymir.root_module.red_zone = false; // Disable stack red zone. ymir.link_z_relro = false; ymir.entry = .{ .symbol_name = "kernelEntry" }; - ymir.image_base = kernel_base; + ymir.linker_script = b.path("ymir/linker.ld"); ymir.root_module.code_model = .kernel; - ymir.root_module.addOptions("option", options); ymir.root_module.addImport("surtr", surtr_module); ymir.root_module.addImport("ymir", ymir_module); @@ -98,7 +91,6 @@ pub fn build(b: *std.Build) void { .optimize = optimize, .link_libc = true, }); - ymir_tests.root_module.addOptions("option", options); ymir_tests.root_module.addImport("ymir", &ymir_tests.root_module); ymir_tests.root_module.addImport("surtr", surtr_module); const run_ymir_tests = b.addRunArtifact(ymir_tests); diff --git a/surtr/boot.zig b/surtr/boot.zig index 917dd90..044a424 100644 --- a/surtr/boot.zig +++ b/surtr/boot.zig @@ -105,6 +105,7 @@ pub fn main() uefi.Status { ); // Calculate necessary memory size for kernel image. + var kernel_start_virt: elf.Elf64_Addr = std.math.maxInt(elf.Elf64_Addr); var kernel_start: elf.Elf64_Addr align(4096) = std.math.maxInt(elf.Elf64_Addr); var kernel_end: elf.Elf64_Addr = 0; var iter = elf_header.program_header_iterator(kernel); @@ -114,20 +115,20 @@ pub fn main() uefi.Status { return .LoadError; } orelse break; if (phdr.p_type != elf.PT_LOAD) continue; - if (phdr.p_vaddr < kernel_start) kernel_start = phdr.p_vaddr; - if (phdr.p_vaddr + phdr.p_memsz > kernel_end) kernel_end = phdr.p_vaddr + phdr.p_memsz; + if (phdr.p_paddr < kernel_start) kernel_start = phdr.p_paddr; + if (phdr.p_vaddr < kernel_start_virt) kernel_start_virt = phdr.p_vaddr; + if (phdr.p_paddr + phdr.p_memsz > kernel_end) kernel_end = phdr.p_paddr + phdr.p_memsz; } const pages_4kib = (kernel_end - kernel_start + 4095) / 4096; log.info("Kernel image: 0x{X:0>16} - 0x{X:0>16} (0x{X} pages)", .{ kernel_start, kernel_end, pages_4kib }); // Allocate memory for kernel image. - var kernel_phys: u64 = undefined; - status = boot_service.allocatePages(.AllocateAnyPages, .LoaderData, pages_4kib, @ptrCast(&kernel_phys)); + status = boot_service.allocatePages(.AllocateAddress, .LoaderData, pages_4kib, @ptrCast(&kernel_start)); if (status != .Success) { log.err("Failed to allocate memory for kernel image: {?}", .{status}); return status; } - log.info("Allocated memory for kernel image @ 0x{X:0>16} ~ 0x{X:0>16}", .{ kernel_phys, kernel_phys + pages_4kib * 4096 }); + log.info("Allocated memory for kernel image @ 0x{X:0>16} ~ 0x{X:0>16}", .{ kernel_start, kernel_start + pages_4kib * 4096 }); // Map memory for kernel image arch.page.setLv4PageTableWritable(boot_service) catch |err| { @@ -138,8 +139,8 @@ pub fn main() uefi.Status { for (0..pages_4kib) |i| { arch.page.mapTo( + kernel_start_virt + 4096 * i, kernel_start + 4096 * i, - kernel_phys + 4096 * i, .ReadWrite, boot_service, ) catch |err| { diff --git a/ymir/linker.ld b/ymir/linker.ld new file mode 100644 index 0000000..f4e8815 --- /dev/null +++ b/ymir/linker.ld @@ -0,0 +1,23 @@ +KERNEL_VADDR_BASE = 0xFFFFFFFF80000000; +KERNEL_VADDR_TEXT = 0xFFFFFFFF80100000; + +SECTIONS { + . = KERNEL_VADDR_TEXT; + + .text ALIGN(4K) : AT (ADDR(.text) - KERNEL_VADDR_BASE) { + *(.text) + } + + .rodata ALIGN(4K) : AT (ADDR(.rodata) - KERNEL_VADDR_BASE) { + *(.rodata) + } + + .data ALIGN(4K) : AT (ADDR(.data) - KERNEL_VADDR_BASE) { + *(.data) + } + + .bss ALIGN(4K) : AT (ADDR(.bss) - KERNEL_VADDR_BASE) { + *(COMMON) + *(.bss) + } +} diff --git a/ymir/main.zig b/ymir/main.zig index 6dfc182..139dd69 100644 --- a/ymir/main.zig +++ b/ymir/main.zig @@ -16,8 +16,8 @@ const BootstrapPageAllocator = ymir.mem.BootstrapPageAllocator; pub const panic = @import("panic.zig").panic_fn; pub const std_options = klog.default_log_options; -/// Size in 4KiB pages of the kernel stack. -const kstack_size = arch.page_size * 0x50; +/// Size in bytes pages of the kernel stack. +const kstack_size = arch.page_size * 5; /// Kernel stack. /// The first page is used as a guard page. /// TODO: make the guard page read-only. diff --git a/ymir/ymir.zig b/ymir/ymir.zig index ca7a656..f779e5a 100644 --- a/ymir/ymir.zig +++ b/ymir/ymir.zig @@ -5,11 +5,11 @@ pub const mem = @import("mem.zig"); pub const spin = @import("spin.zig"); /// Base virtual address of direct mapping. -/// The virtual address starting from the address is directly mapped to the physical address. +/// The virtual address starting from the address is directly mapped to the physical address at 0x0. pub const direct_map_base = 0xFFFF_8880_0000_0000; /// The base virtual address of the kernel. -/// The physical address to which this virtual address is mapped is undefined. -pub const kernel_base = @import("option").kernel_base; +/// The virtual address strating from the address is directly mapped to the physical address at 0x0. +pub const kernel_base = 0xFFFF_FFFF_8000_0000; test { @import("std").testing.refAllDeclsRecursive(@This());