diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e64a6df0..1216bcab 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -26,6 +26,9 @@ jobs: - nightly - 1.59 runs-on: ubuntu-latest + env: + # rustup prioritizes environment variables over rust-toolchain.toml files. + RUSTUP_TOOLCHAIN: ${{ matrix.rust }} steps: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@master @@ -53,7 +56,7 @@ jobs: - uses: actions/checkout@v4 - uses: dtolnay/rust-toolchain@nightly with: - targets: x86_64-unknown-linux-musl, i686-unknown-linux-gnu, thumbv7em-none-eabihf + targets: x86_64-unknown-linux-musl, i686-unknown-linux-musl, thumbv7em-none-eabihf - run: cargo build @@ -69,9 +72,12 @@ jobs: - name: "Build on non x86_64 platforms" run: | - cargo build --target i686-unknown-linux-gnu --no-default-features --features nightly + cargo build --target i686-unknown-linux-musl --no-default-features --features nightly cargo build --target thumbv7em-none-eabihf --no-default-features --features nightly + - run: cargo test --target i686-unknown-linux-musl --no-default-features --features nightly + if: runner.os == 'Linux' + bootloader-test: name: "Bootloader Integration Test" diff --git a/Cargo.toml b/Cargo.toml index 5d53caae..9b51f248 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,7 @@ license = "MIT/Apache-2.0" name = "x86_64" readme = "README.md" repository = "https://github.com/rust-osdev/x86_64" -version = "0.15.1" +version = "0.15.2" edition = "2018" rust-version = "1.59" # Needed to support inline asm and default const generics @@ -28,8 +28,9 @@ rustversion = "1.0.5" [features] default = ["nightly", "instructions"] instructions = [] -nightly = [ "const_fn", "step_trait", "abi_x86_interrupt", "asm_const" ] +nightly = ["const_fn", "step_trait", "abi_x86_interrupt", "asm_const"] abi_x86_interrupt = [] +# deprecated, no longer needed const_fn = [] asm_const = [] step_trait = [] diff --git a/Changelog.md b/Changelog.md index 2f386b41..8b615175 100644 --- a/Changelog.md +++ b/Changelog.md @@ -5,6 +5,46 @@ - [add `Mapper::clear` to clear any page table entry regardless of the present flag](https://github.com/rust-osdev/x86_64/pull/484) - [`Mapper::unmap` now also returns the flags of the page ](https://github.com/rust-osdev/x86_64/pull/484) +# 0.15.2 – 2024-11-30 + +This release is compatible with Rust nightlies starting with `nightly-2024-11-23` (this only applies when the `nightly` feature is used). + +## New Features + +- [add `GlobalDescriptorTable::limit`](https://github.com/rust-osdev/x86_64/pull/413) +- [constify PhysFrame functions](https://github.com/rust-osdev/x86_64/pull/489) +- [add `size` and `len` for `PageRange`, `PhysFrameRange`, `PageRangeInclusive` and `PhysFrameRangeInclusive`](https://github.com/rust-osdev/x86_64/pull/491) +- [TryFrom implementation for ExceptionVector](https://github.com/rust-osdev/x86_64/pull/506) + +## Fixes + +- [Only enable instructions on `x86_64`](https://github.com/rust-osdev/x86_64/pull/483) +- [Ensure that Page actually implements Hash](https://github.com/rust-osdev/x86_64/pull/490) +- [fix field order for INVPCID descriptor](https://github.com/rust-osdev/x86_64/pull/508) +- [fix typo in "InvPicdCommand"](https://github.com/rust-osdev/x86_64/pull/509) +- [fix signature of Step::steps_between implementations](https://github.com/rust-osdev/x86_64/pull/513) + +## Other Improvements + +- [docs: add aliases for `in{,b,w,l}` and `out{,b,w,l}`](https://github.com/rust-osdev/x86_64/pull/474) +- [ci: migrate away from unmaintained actions](https://github.com/rust-osdev/x86_64/pull/478) +- [chore: migrate from legacy `rust-toolchain` to `rust-toolchain.toml`](https://github.com/rust-osdev/x86_64/pull/479) +- [test: replace `x86_64-bare-metal.json` with `x86_64-unknown-none`](https://github.com/rust-osdev/x86_64/pull/477) +- [docs: fix and detect warnings](https://github.com/rust-osdev/x86_64/pull/475) +- [CI: Set `-Crelocation-model=static` in `RUSTFLAGS` for bootloader test job](https://github.com/rust-osdev/x86_64/pull/480) +- [silence warning about cast](https://github.com/rust-osdev/x86_64/pull/482) +- [fix cfg related warnings](https://github.com/rust-osdev/x86_64/pull/485) +- [fix warnings](https://github.com/rust-osdev/x86_64/pull/488) +- [don't use label starting with 1](https://github.com/rust-osdev/x86_64/pull/492) +- [fix testing](https://github.com/rust-osdev/x86_64/pull/495) +- [remove `#![feature(asm_const)]`](https://github.com/rust-osdev/x86_64/pull/496) +- [Remove stabilized const_mut_refs feature](https://github.com/rust-osdev/x86_64/pull/501) +- [Fix clippy warnings](https://github.com/rust-osdev/x86_64/pull/502) +- [fix CI job for building on MSRV](https://github.com/rust-osdev/x86_64/pull/510) +- [gate HandlerFunc behind target_arch = "x86{\_64}"](https://github.com/rust-osdev/x86_64/pull/507) +- [Typo fix in TaskStateSegment comment](https://github.com/rust-osdev/x86_64/pull/504) +- [Minor clarification DescriptorTablePointer::limit comment](https://github.com/rust-osdev/x86_64/pull/503) + # 0.15.1 – 2024-03-19 ## New Features diff --git a/src/addr.rs b/src/addr.rs index 19031986..7ce471ee 100644 --- a/src/addr.rs +++ b/src/addr.rs @@ -240,25 +240,43 @@ impl VirtAddr { } // FIXME: Move this into the `Step` impl, once `Step` is stabilized. + #[cfg(feature = "step_trait")] + pub(crate) fn steps_between_impl(start: &Self, end: &Self) -> (usize, Option) { + if let Some(steps) = Self::steps_between_u64(start, end) { + let steps = usize::try_from(steps).ok(); + (steps.unwrap_or(usize::MAX), steps) + } else { + (0, None) + } + } + + /// An implementation of steps_between that returns u64. Note that this + /// function always returns the exact bound, so it doesn't need to return a + /// lower and upper bound like steps_between does. #[cfg(any(feature = "instructions", feature = "step_trait"))] - pub(crate) fn steps_between_impl(start: &Self, end: &Self) -> Option { + pub(crate) fn steps_between_u64(start: &Self, end: &Self) -> Option { let mut steps = end.0.checked_sub(start.0)?; // Mask away extra bits that appear while jumping the gap. steps &= 0xffff_ffff_ffff; - usize::try_from(steps).ok() + Some(steps) } // FIXME: Move this into the `Step` impl, once `Step` is stabilized. #[inline] pub(crate) fn forward_checked_impl(start: Self, count: usize) -> Option { - let offset = u64::try_from(count).ok()?; - if offset > ADDRESS_SPACE_SIZE { + Self::forward_checked_u64(start, u64::try_from(count).ok()?) + } + + /// An implementation of forward_checked that takes u64 instead of usize. + #[inline] + pub(crate) fn forward_checked_u64(start: Self, count: u64) -> Option { + if count > ADDRESS_SPACE_SIZE { return None; } - let mut addr = start.0.checked_add(offset)?; + let mut addr = start.0.checked_add(count)?; match addr.get_bits(47..) { 0x1 => { @@ -274,6 +292,31 @@ impl VirtAddr { Some(unsafe { Self::new_unsafe(addr) }) } + + /// An implementation of backward_checked that takes u64 instead of usize. + #[cfg(feature = "step_trait")] + #[inline] + pub(crate) fn backward_checked_u64(start: Self, count: u64) -> Option { + if count > ADDRESS_SPACE_SIZE { + return None; + } + + let mut addr = start.0.checked_sub(count)?; + + match addr.get_bits(47..) { + 0x1fffe => { + // Jump the gap by sign extending the 47th bit. + addr.set_bits(47.., 0); + } + 0x1fffd => { + // Address underflow + return None; + } + _ => {} + } + + Some(unsafe { Self::new_unsafe(addr) }) + } } impl fmt::Debug for VirtAddr { @@ -360,7 +403,7 @@ impl Sub for VirtAddr { #[cfg(feature = "step_trait")] impl Step for VirtAddr { #[inline] - fn steps_between(start: &Self, end: &Self) -> Option { + fn steps_between(start: &Self, end: &Self) -> (usize, Option) { Self::steps_between_impl(start, end) } @@ -371,26 +414,7 @@ impl Step for VirtAddr { #[inline] fn backward_checked(start: Self, count: usize) -> Option { - let offset = u64::try_from(count).ok()?; - if offset > ADDRESS_SPACE_SIZE { - return None; - } - - let mut addr = start.0.checked_sub(offset)?; - - match addr.get_bits(47..) { - 0x1fffe => { - // Jump the gap by sign extending the 47th bit. - addr.set_bits(47.., 0); - } - 0x1fffd => { - // Address underflow - return None; - } - _ => {} - } - - Some(unsafe { Self::new_unsafe(addr) }) + Self::backward_checked_u64(start, u64::try_from(count).ok()?) } } @@ -495,7 +519,15 @@ impl PhysAddr { where U: Into, { - PhysAddr(align_down(self.0, align.into())) + self.align_down_u64(align.into()) + } + + /// Aligns the physical address downwards to the given alignment. + /// + /// See the `align_down` function for more information. + #[inline] + pub(crate) const fn align_down_u64(self, align: u64) -> Self { + PhysAddr(align_down(self.0, align)) } /// Checks whether the physical address has the demanded alignment. @@ -504,7 +536,13 @@ impl PhysAddr { where U: Into, { - self.align_down(align) == self + self.is_aligned_u64(align.into()) + } + + /// Checks whether the physical address has the demanded alignment. + #[inline] + pub(crate) const fn is_aligned_u64(self, align: u64) -> bool { + self.align_down_u64(align).as_u64() == self.as_u64() } } @@ -650,22 +688,27 @@ mod tests { Step::forward_checked(VirtAddr(0xffff_ffff_ffff_ffff), 1), None ); + #[cfg(target_pointer_width = "64")] assert_eq!( Step::forward(VirtAddr(0x7fff_ffff_ffff), 0x1234_5678_9abd), VirtAddr(0xffff_9234_5678_9abc) ); + #[cfg(target_pointer_width = "64")] assert_eq!( Step::forward(VirtAddr(0x7fff_ffff_ffff), 0x8000_0000_0000), VirtAddr(0xffff_ffff_ffff_ffff) ); + #[cfg(target_pointer_width = "64")] assert_eq!( Step::forward(VirtAddr(0x7fff_ffff_ff00), 0x8000_0000_00ff), VirtAddr(0xffff_ffff_ffff_ffff) ); + #[cfg(target_pointer_width = "64")] assert_eq!( Step::forward_checked(VirtAddr(0x7fff_ffff_ff00), 0x8000_0000_0100), None ); + #[cfg(target_pointer_width = "64")] assert_eq!( Step::forward_checked(VirtAddr(0x7fff_ffff_ffff), 0x8000_0000_0001), None @@ -686,18 +729,22 @@ mod tests { Step::backward(VirtAddr(0xffff_8000_0000_0001), 1), VirtAddr(0xffff_8000_0000_0000) ); + #[cfg(target_pointer_width = "64")] assert_eq!( Step::backward(VirtAddr(0xffff_9234_5678_9abc), 0x1234_5678_9abd), VirtAddr(0x7fff_ffff_ffff) ); + #[cfg(target_pointer_width = "64")] assert_eq!( Step::backward(VirtAddr(0xffff_8000_0000_0000), 0x8000_0000_0000), VirtAddr(0) ); + #[cfg(target_pointer_width = "64")] assert_eq!( Step::backward(VirtAddr(0xffff_8000_0000_0000), 0x7fff_ffff_ff01), VirtAddr(0xff) ); + #[cfg(target_pointer_width = "64")] assert_eq!( Step::backward_checked(VirtAddr(0xffff_8000_0000_0000), 0x8000_0000_0001), None @@ -707,43 +754,64 @@ mod tests { #[test] #[cfg(feature = "step_trait")] fn virtaddr_steps_between() { - assert_eq!(Step::steps_between(&VirtAddr(0), &VirtAddr(0)), Some(0)); - assert_eq!(Step::steps_between(&VirtAddr(0), &VirtAddr(1)), Some(1)); - assert_eq!(Step::steps_between(&VirtAddr(1), &VirtAddr(0)), None); + assert_eq!( + Step::steps_between(&VirtAddr(0), &VirtAddr(0)), + (0, Some(0)) + ); + assert_eq!( + Step::steps_between(&VirtAddr(0), &VirtAddr(1)), + (1, Some(1)) + ); + assert_eq!(Step::steps_between(&VirtAddr(1), &VirtAddr(0)), (0, None)); assert_eq!( Step::steps_between( &VirtAddr(0x7fff_ffff_ffff), &VirtAddr(0xffff_8000_0000_0000) ), - Some(1) + (1, Some(1)) ); assert_eq!( Step::steps_between( &VirtAddr(0xffff_8000_0000_0000), &VirtAddr(0x7fff_ffff_ffff) ), - None + (0, None) ); assert_eq!( Step::steps_between( &VirtAddr(0xffff_8000_0000_0000), &VirtAddr(0xffff_8000_0000_0000) ), - Some(0) + (0, Some(0)) ); assert_eq!( Step::steps_between( &VirtAddr(0xffff_8000_0000_0000), &VirtAddr(0xffff_8000_0000_0001) ), - Some(1) + (1, Some(1)) ); assert_eq!( Step::steps_between( &VirtAddr(0xffff_8000_0000_0001), &VirtAddr(0xffff_8000_0000_0000) ), - None + (0, None) + ); + // Make sure that we handle `steps > u32::MAX` correctly on 32-bit + // targets. On 64-bit targets, `0x1_0000_0000` fits into `usize`, so we + // can return exact lower and upper bounds. On 32-bit targets, + // `0x1_0000_0000` doesn't fit into `usize`, so we only return an lower + // bound of `usize::MAX` and don't return an upper bound. + #[cfg(target_pointer_width = "64")] + assert_eq!( + Step::steps_between(&VirtAddr(0), &VirtAddr(0x1_0000_0000)), + (0x1_0000_0000, Some(0x1_0000_0000)) + ); + #[cfg(not(target_pointer_width = "64"))] + assert_eq!( + Step::steps_between(&VirtAddr(0), &VirtAddr(0x1_0000_0000)), + (usize::MAX, None) ); } @@ -795,10 +863,14 @@ mod tests { } #[test] + #[cfg(target_pointer_width = "64")] fn test_from_ptr_array() { let slice = &[1, 2, 3, 4, 5]; // Make sure that from_ptr(slice) is the address of the first element - assert_eq!(VirtAddr::from_ptr(slice), VirtAddr::from_ptr(&slice[0])); + assert_eq!( + VirtAddr::from_ptr(slice.as_slice()), + VirtAddr::from_ptr(&slice[0]) + ); } } @@ -937,7 +1009,7 @@ mod proofs { }; // ...then `steps_between` succeeds as well. - assert!(Step::steps_between(&start, &end) == Some(count)); + assert!(Step::steps_between(&start, &end) == (count, Some(count))); } // This harness proves that for all inputs for which `steps_between` @@ -954,7 +1026,7 @@ mod proofs { }; // If `steps_between` succeeds... - let Some(count) = Step::steps_between(&start, &end) else { + let Some(count) = Step::steps_between(&start, &end).1 else { return; }; diff --git a/src/instructions/mod.rs b/src/instructions/mod.rs index 4b676c55..8018698c 100644 --- a/src/instructions/mod.rs +++ b/src/instructions/mod.rs @@ -32,8 +32,11 @@ pub fn nop() { } } -/// Emits a '[magic breakpoint](https://wiki.osdev.org/Bochs#Magic_Breakpoint)' instruction for the [Bochs](http://bochs.sourceforge.net/) CPU -/// emulator. Make sure to set `magic_break: enabled=1` in your `.bochsrc` file. +/// Emits a '[magic breakpoint](https://wiki.osdev.org/Bochs#Magic_Breakpoint)' +/// instruction for the [Bochs](http://bochs.sourceforge.net/) CPU +/// emulator. +/// +/// Make sure to set `magic_break: enabled=1` in your `.bochsrc` file. #[inline] pub fn bochs_breakpoint() { unsafe { diff --git a/src/instructions/segmentation.rs b/src/instructions/segmentation.rs index f170b083..c8fbfbab 100644 --- a/src/instructions/segmentation.rs +++ b/src/instructions/segmentation.rs @@ -75,10 +75,10 @@ impl Segment for CS { unsafe { asm!( "push {sel}", - "lea {tmp}, [1f + rip]", + "lea {tmp}, [55f + rip]", "push {tmp}", "retfq", - "1:", + "55:", sel = in(reg) u64::from(sel.0), tmp = lateout(reg) _, options(preserves_flags), diff --git a/src/instructions/tlb.rs b/src/instructions/tlb.rs index ea60fce7..4a523483 100644 --- a/src/instructions/tlb.rs +++ b/src/instructions/tlb.rs @@ -30,7 +30,7 @@ pub fn flush_all() { /// The Invalidate PCID Command to execute. #[derive(Debug)] -pub enum InvPicdCommand { +pub enum InvPcidCommand { /// The logical processor invalidates mappings—except global translations—for the linear address and PCID specified. Address(VirtAddr, Pcid), @@ -44,13 +44,18 @@ pub enum InvPicdCommand { AllExceptGlobal, } +// TODO: Remove this in the next breaking release. +#[deprecated = "please use `InvPcidCommand` instead"] +#[doc(hidden)] +pub type InvPicdCommand = InvPcidCommand; + /// The INVPCID descriptor comprises 128 bits and consists of a PCID and a linear address. /// For INVPCID type 0, the processor uses the full 64 bits of the linear address even outside 64-bit mode; the linear address is not used for other INVPCID types. #[repr(C)] #[derive(Debug)] struct InvpcidDescriptor { - address: u64, pcid: u64, + address: u64, } /// Structure of a PCID. A PCID has to be <= 4096 for x86_64. @@ -93,25 +98,25 @@ impl fmt::Display for PcidTooBig { /// /// This function is unsafe as it requires CPUID.(EAX=07H, ECX=0H):EBX.INVPCID to be 1. #[inline] -pub unsafe fn flush_pcid(command: InvPicdCommand) { +pub unsafe fn flush_pcid(command: InvPcidCommand) { let mut desc = InvpcidDescriptor { - address: 0, pcid: 0, + address: 0, }; let kind: u64; match command { - InvPicdCommand::Address(addr, pcid) => { + InvPcidCommand::Address(addr, pcid) => { kind = 0; desc.pcid = pcid.value().into(); desc.address = addr.as_u64() } - InvPicdCommand::Single(pcid) => { + InvPcidCommand::Single(pcid) => { kind = 1; desc.pcid = pcid.0.into() } - InvPicdCommand::All => kind = 2, - InvPicdCommand::AllExceptGlobal => kind = 3, + InvPcidCommand::All => kind = 2, + InvPcidCommand::AllExceptGlobal => kind = 3, } unsafe { @@ -310,14 +315,14 @@ where if let Some(mut pages) = self.page_range { while !pages.is_empty() { // Calculate out how many pages we still need to flush. - let count = Page::::steps_between_impl(&pages.start, &pages.end).unwrap(); + let count = Page::::steps_between_impl(&pages.start, &pages.end).0; // Make sure that we never jump the gap in the address space when flushing. let second_half_start = Page::::containing_address(VirtAddr::new(0xffff_8000_0000_0000)); let count = if pages.start < second_half_start { let count_to_second_half = - Page::steps_between_impl(&pages.start, &second_half_start).unwrap(); + Page::steps_between_impl(&pages.start, &second_half_start).0; cmp::min(count, count_to_second_half) } else { count diff --git a/src/lib.rs b/src/lib.rs index 0f0ee2b1..08b30ee9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,8 +2,6 @@ //! and access to various system registers. #![cfg_attr(not(test), no_std)] -#![cfg_attr(feature = "const_fn", feature(const_mut_refs))] // GDT::append() -#![cfg_attr(feature = "asm_const", feature(asm_const))] #![cfg_attr(feature = "abi_x86_interrupt", feature(abi_x86_interrupt))] #![cfg_attr(feature = "step_trait", feature(step_trait))] #![cfg_attr(feature = "doc_auto_cfg", feature(doc_auto_cfg))] diff --git a/src/registers/model_specific.rs b/src/registers/model_specific.rs index 07504878..0471bfae 100644 --- a/src/registers/model_specific.rs +++ b/src/registers/model_specific.rs @@ -357,11 +357,11 @@ mod x86_64 { /// /// # Returns /// - Field 1 (SYSRET): The CS selector is set to this field + 16. SS.Sel is set to - /// this field + 8. Because SYSRET always returns to CPL 3, the - /// RPL bits 1:0 should be initialized to 11b. + /// this field + 8. Because SYSRET always returns to CPL 3, the + /// RPL bits 1:0 should be initialized to 11b. /// - Field 2 (SYSCALL): This field is copied directly into CS.Sel. SS.Sel is set to - /// this field + 8. Because SYSCALL always switches to CPL 0, the RPL bits - /// 33:32 should be initialized to 00b. + /// this field + 8. Because SYSCALL always switches to CPL 0, the RPL bits + /// 33:32 should be initialized to 00b. #[inline] pub fn read_raw() -> (u16, u16) { let msr_value = unsafe { Self::MSR.read() }; @@ -398,11 +398,11 @@ mod x86_64 { /// /// # Parameters /// - sysret: The CS selector is set to this field + 16. SS.Sel is set to - /// this field + 8. Because SYSRET always returns to CPL 3, the - /// RPL bits 1:0 should be initialized to 11b. + /// this field + 8. Because SYSRET always returns to CPL 3, the + /// RPL bits 1:0 should be initialized to 11b. /// - syscall: This field is copied directly into CS.Sel. SS.Sel is set to - /// this field + 8. Because SYSCALL always switches to CPL 0, the RPL bits - /// 33:32 should be initialized to 00b. + /// this field + 8. Because SYSCALL always switches to CPL 0, the RPL bits + /// 33:32 should be initialized to 00b. /// /// # Safety /// diff --git a/src/structures/gdt.rs b/src/structures/gdt.rs index c04b4e42..e839fa6e 100644 --- a/src/structures/gdt.rs +++ b/src/structures/gdt.rs @@ -193,7 +193,7 @@ impl GlobalDescriptorTable { /// /// Panics if the GDT doesn't have enough free entries. #[inline] - #[cfg_attr(feature = "const_fn", rustversion::attr(all(), const))] + #[rustversion::attr(since(1.83), const)] pub fn append(&mut self, entry: Descriptor) -> SegmentSelector { let index = match entry { Descriptor::UserSegment(value) => { @@ -246,7 +246,7 @@ impl GlobalDescriptorTable { } #[inline] - #[cfg_attr(feature = "const_fn", rustversion::attr(all(), const))] + #[rustversion::attr(since(1.83), const)] fn push(&mut self, value: u64) -> usize { let index = self.len; self.table[index] = Entry::new(value); diff --git a/src/structures/idt.rs b/src/structures/idt.rs index 2d9beb5a..d4a8deb8 100644 --- a/src/structures/idt.rs +++ b/src/structures/idt.rs @@ -24,6 +24,7 @@ use crate::registers::rflags::RFlags; use crate::{PrivilegeLevel, VirtAddr}; use bit_field::BitField; use bitflags::bitflags; +use core::convert::TryFrom; use core::fmt; use core::marker::PhantomData; use core::ops::Bound::{Excluded, Included, Unbounded}; @@ -153,9 +154,9 @@ pub struct InterruptDescriptorTable { /// is enabled. /// - Execution of any legacy SSE instruction when `CR4.OSFXSR` is cleared to 0. /// - Execution of any SSE instruction (uses `YMM`/`XMM` registers), or 64-bit media - /// instruction (uses `MMXTM` registers) when `CR0.EM` = 1. + /// instruction (uses `MMXTM` registers) when `CR0.EM` = 1. /// - Execution of any SSE floating-point instruction (uses `YMM`/`XMM` registers) that - /// causes a numeric exception when `CR4.OSXMMEXCPT` = 0. + /// causes a numeric exception when `CR4.OSXMMEXCPT` = 0. /// - Use of the `DR4` or `DR5` debug registers when `CR4.DE` = 1. /// - Execution of `RSM` when not in `SMM` mode. /// @@ -503,7 +504,7 @@ impl InterruptDescriptorTable { /// /// - `self` is never destroyed. /// - `self` always stays at the same memory location. It is recommended to wrap it in - /// a `Box`. + /// a `Box`. /// #[cfg(all(feature = "instructions", target_arch = "x86_64"))] #[inline] @@ -712,52 +713,82 @@ impl PartialEq for Entry { /// A handler function for an interrupt or an exception without error code. /// /// This type alias is only usable with the `abi_x86_interrupt` feature enabled. -#[cfg(feature = "abi_x86_interrupt")] +#[cfg(all( + any(target_arch = "x86", target_arch = "x86_64"), + feature = "abi_x86_interrupt" +))] pub type HandlerFunc = extern "x86-interrupt" fn(InterruptStackFrame); /// This type is not usable without the `abi_x86_interrupt` feature. -#[cfg(not(feature = "abi_x86_interrupt"))] +#[cfg(not(all( + any(target_arch = "x86", target_arch = "x86_64"), + feature = "abi_x86_interrupt" +)))] #[derive(Copy, Clone, Debug)] pub struct HandlerFunc(()); /// A handler function for an exception that pushes an error code. /// /// This type alias is only usable with the `abi_x86_interrupt` feature enabled. -#[cfg(feature = "abi_x86_interrupt")] +#[cfg(all( + any(target_arch = "x86", target_arch = "x86_64"), + feature = "abi_x86_interrupt" +))] pub type HandlerFuncWithErrCode = extern "x86-interrupt" fn(InterruptStackFrame, error_code: u64); /// This type is not usable without the `abi_x86_interrupt` feature. -#[cfg(not(feature = "abi_x86_interrupt"))] +#[cfg(not(all( + any(target_arch = "x86", target_arch = "x86_64"), + feature = "abi_x86_interrupt" +)))] #[derive(Copy, Clone, Debug)] pub struct HandlerFuncWithErrCode(()); /// A page fault handler function that pushes a page fault error code. /// /// This type alias is only usable with the `abi_x86_interrupt` feature enabled. -#[cfg(feature = "abi_x86_interrupt")] +#[cfg(all( + any(target_arch = "x86", target_arch = "x86_64"), + feature = "abi_x86_interrupt" +))] pub type PageFaultHandlerFunc = extern "x86-interrupt" fn(InterruptStackFrame, error_code: PageFaultErrorCode); /// This type is not usable without the `abi_x86_interrupt` feature. -#[cfg(not(feature = "abi_x86_interrupt"))] +#[cfg(not(all( + any(target_arch = "x86", target_arch = "x86_64"), + feature = "abi_x86_interrupt" +)))] #[derive(Copy, Clone, Debug)] pub struct PageFaultHandlerFunc(()); /// A handler function that must not return, e.g. for a machine check exception. /// /// This type alias is only usable with the `abi_x86_interrupt` feature enabled. -#[cfg(feature = "abi_x86_interrupt")] +#[cfg(all( + any(target_arch = "x86", target_arch = "x86_64"), + feature = "abi_x86_interrupt" +))] pub type DivergingHandlerFunc = extern "x86-interrupt" fn(InterruptStackFrame) -> !; /// This type is not usable without the `abi_x86_interrupt` feature. -#[cfg(not(feature = "abi_x86_interrupt"))] +#[cfg(not(all( + any(target_arch = "x86", target_arch = "x86_64"), + feature = "abi_x86_interrupt" +)))] #[derive(Copy, Clone, Debug)] pub struct DivergingHandlerFunc(()); /// A handler function with an error code that must not return, e.g. for a double fault exception. /// /// This type alias is only usable with the `abi_x86_interrupt` feature enabled. -#[cfg(feature = "abi_x86_interrupt")] +#[cfg(all( + any(target_arch = "x86", target_arch = "x86_64"), + feature = "abi_x86_interrupt" +))] pub type DivergingHandlerFuncWithErrCode = extern "x86-interrupt" fn(InterruptStackFrame, error_code: u64) -> !; /// This type is not usable without the `abi_x86_interrupt` feature. -#[cfg(not(feature = "abi_x86_interrupt"))] +#[cfg(not(all( + any(target_arch = "x86", target_arch = "x86_64"), + feature = "abi_x86_interrupt" +)))] #[derive(Copy, Clone, Debug)] pub struct DivergingHandlerFuncWithErrCode(()); @@ -853,7 +884,10 @@ pub unsafe trait HandlerFuncType { macro_rules! impl_handler_func_type { ($f:ty) => { - #[cfg(feature = "abi_x86_interrupt")] + #[cfg(all( + any(target_arch = "x86", target_arch = "x86_64"), + feature = "abi_x86_interrupt" + ))] unsafe impl HandlerFuncType for $f { #[inline] fn to_virt_addr(self) -> VirtAddr { @@ -1328,6 +1362,52 @@ pub enum ExceptionVector { Security = 0x1E, } +/// Exception vector number is invalid +#[derive(Debug)] +pub struct InvalidExceptionVectorNumber(u8); + +impl fmt::Display for InvalidExceptionVectorNumber { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{} is not a valid exception vector", self.0) + } +} + +impl TryFrom for ExceptionVector { + type Error = InvalidExceptionVectorNumber; + + /// Tries to convert the exception vector number to [`ExceptionVector`] + /// + /// Fails if exception vector number is Coprocessor Segment Overrun, reserved or not exception vector number + fn try_from(exception_vector_number: u8) -> Result { + match exception_vector_number { + 0x00 => Ok(Self::Division), + 0x01 => Ok(Self::Debug), + 0x02 => Ok(Self::NonMaskableInterrupt), + 0x03 => Ok(Self::Breakpoint), + 0x04 => Ok(Self::Overflow), + 0x05 => Ok(Self::BoundRange), + 0x06 => Ok(Self::InvalidOpcode), + 0x07 => Ok(Self::DeviceNotAvailable), + 0x08 => Ok(Self::Double), + 0x0A => Ok(Self::InvalidTss), + 0x0B => Ok(Self::SegmentNotPresent), + 0x0C => Ok(Self::Stack), + 0x0D => Ok(Self::GeneralProtection), + 0x0E => Ok(Self::Page), + 0x10 => Ok(Self::X87FloatingPoint), + 0x11 => Ok(Self::AlignmentCheck), + 0x12 => Ok(Self::MachineCheck), + 0x13 => Ok(Self::SimdFloatingPoint), + 0x14 => Ok(Self::Virtualization), + 0x15 => Ok(Self::ControlProtection), + 0x1C => Ok(Self::HypervisorInjection), + 0x1D => Ok(Self::VmmCommunication), + 0x1E => Ok(Self::Security), + _ => Err(InvalidExceptionVectorNumber(exception_vector_number)), + } + } +} + #[cfg(all( feature = "instructions", feature = "abi_x86_interrupt", @@ -1642,7 +1722,7 @@ mod test { #[test] fn entry_derive_test() { - fn foo(_: impl Clone + Copy + PartialEq + fmt::Debug) {} + fn foo(_: impl Copy + PartialEq + fmt::Debug) {} foo(Entry:: { pointer_low: 0, @@ -1667,9 +1747,7 @@ mod test { }); unsafe { - frame - .as_mut() - .update(|f| f.instruction_pointer = f.instruction_pointer + 2u64); + frame.as_mut().update(|f| f.instruction_pointer += 2u64); } } } diff --git a/src/structures/mod.rs b/src/structures/mod.rs index 3bfcf78e..1bf7d19f 100644 --- a/src/structures/mod.rs +++ b/src/structures/mod.rs @@ -15,7 +15,7 @@ pub mod tss; #[derive(Debug, Clone, Copy)] #[repr(C, packed(2))] pub struct DescriptorTablePointer { - /// Size of the DT. + /// Size of the DT in bytes - 1. pub limit: u16, /// Pointer to the memory region containing the DT. pub base: VirtAddr, diff --git a/src/structures/paging/frame.rs b/src/structures/paging/frame.rs index 6cae8fab..c4a8c97a 100644 --- a/src/structures/paging/frame.rs +++ b/src/structures/paging/frame.rs @@ -21,8 +21,9 @@ impl PhysFrame { /// /// Returns an error if the address is not correctly aligned (i.e. is not a valid frame start). #[inline] + #[rustversion::attr(since(1.61), const)] pub fn from_start_address(address: PhysAddr) -> Result { - if !address.is_aligned(S::SIZE) { + if !address.is_aligned_u64(S::SIZE) { return Err(AddressNotAligned); } @@ -46,9 +47,10 @@ impl PhysFrame { /// Returns the frame that contains the given physical address. #[inline] + #[rustversion::attr(since(1.61), const)] pub fn containing_address(address: PhysAddr) -> Self { PhysFrame { - start_address: address.align_down(S::SIZE), + start_address: address.align_down_u64(S::SIZE), size: PhantomData, } } @@ -146,6 +148,22 @@ impl PhysFrameRange { pub fn is_empty(&self) -> bool { self.start >= self.end } + + /// Returns the number of frames in the range. + #[inline] + pub fn len(&self) -> u64 { + if !self.is_empty() { + self.end - self.start + } else { + 0 + } + } + + /// Returns the size in bytes of all frames within the range. + #[inline] + pub fn size(&self) -> u64 { + S::SIZE * self.len() + } } impl Iterator for PhysFrameRange { @@ -188,6 +206,22 @@ impl PhysFrameRangeInclusive { pub fn is_empty(&self) -> bool { self.start > self.end } + + /// Returns the number of frames in the range. + #[inline] + pub fn len(&self) -> u64 { + if !self.is_empty() { + self.end - self.start + 1 + } else { + 0 + } + } + + /// Returns the size in bytes of all frames within the range. + #[inline] + pub fn size(&self) -> u64 { + S::SIZE * self.len() + } } impl Iterator for PhysFrameRangeInclusive { @@ -213,3 +247,20 @@ impl fmt::Debug for PhysFrameRangeInclusive { .finish() } } + +#[cfg(test)] +mod tests { + use super::*; + #[test] + pub fn test_frame_range_len() { + let start_addr = PhysAddr::new(0xdead_beaf); + let start = PhysFrame::::containing_address(start_addr); + let end = start + 50; + + let range = PhysFrameRange { start, end }; + assert_eq!(range.len(), 50); + + let range_inclusive = PhysFrameRangeInclusive { start, end }; + assert_eq!(range_inclusive.len(), 51); + } +} diff --git a/src/structures/paging/mapper/mapped_page_table.rs b/src/structures/paging/mapper/mapped_page_table.rs index 06bda1b1..77e818fe 100644 --- a/src/structures/paging/mapper/mapped_page_table.rs +++ b/src/structures/paging/mapper/mapped_page_table.rs @@ -150,7 +150,7 @@ impl<'a, P: PageTableFrameMapping> MappedPageTable<'a, P> { } } -impl<'a, P: PageTableFrameMapping> Mapper for MappedPageTable<'a, P> { +impl Mapper for MappedPageTable<'_, P> { #[inline] unsafe fn map_to_with_table_flags( &mut self, @@ -290,7 +290,7 @@ impl<'a, P: PageTableFrameMapping> Mapper for MappedPageTable<'a, P> { } } -impl<'a, P: PageTableFrameMapping> Mapper for MappedPageTable<'a, P> { +impl Mapper for MappedPageTable<'_, P> { #[inline] unsafe fn map_to_with_table_flags( &mut self, @@ -451,7 +451,7 @@ impl<'a, P: PageTableFrameMapping> Mapper for MappedPageTable<'a, P> { } } -impl<'a, P: PageTableFrameMapping> Mapper for MappedPageTable<'a, P> { +impl Mapper for MappedPageTable<'_, P> { #[inline] unsafe fn map_to_with_table_flags( &mut self, @@ -629,7 +629,7 @@ impl<'a, P: PageTableFrameMapping> Mapper for MappedPageTable<'a, P> { } } -impl<'a, P: PageTableFrameMapping> Translate for MappedPageTable<'a, P> { +impl Translate for MappedPageTable<'_, P> { #[allow(clippy::inconsistent_digit_grouping)] fn translate(&self, addr: VirtAddr) -> TranslateResult { let p4 = &self.level_4_table; @@ -693,7 +693,7 @@ impl<'a, P: PageTableFrameMapping> Translate for MappedPageTable<'a, P> { } } -impl<'a, P: PageTableFrameMapping> CleanUp for MappedPageTable<'a, P> { +impl CleanUp for MappedPageTable<'_, P> { #[inline] unsafe fn clean_up(&mut self, frame_deallocator: &mut D) where diff --git a/src/structures/paging/mapper/offset_page_table.rs b/src/structures/paging/mapper/offset_page_table.rs index 04d6b153..2ec13ca6 100644 --- a/src/structures/paging/mapper/offset_page_table.rs +++ b/src/structures/paging/mapper/offset_page_table.rs @@ -65,7 +65,7 @@ unsafe impl PageTableFrameMapping for PhysOffset { // delegate all trait implementations to inner -impl<'a> Mapper for OffsetPageTable<'a> { +impl Mapper for OffsetPageTable<'_> { #[inline] unsafe fn map_to_with_table_flags( &mut self, @@ -139,7 +139,7 @@ impl<'a> Mapper for OffsetPageTable<'a> { } } -impl<'a> Mapper for OffsetPageTable<'a> { +impl Mapper for OffsetPageTable<'_> { #[inline] unsafe fn map_to_with_table_flags( &mut self, @@ -213,7 +213,7 @@ impl<'a> Mapper for OffsetPageTable<'a> { } } -impl<'a> Mapper for OffsetPageTable<'a> { +impl Mapper for OffsetPageTable<'_> { #[inline] unsafe fn map_to_with_table_flags( &mut self, @@ -287,14 +287,14 @@ impl<'a> Mapper for OffsetPageTable<'a> { } } -impl<'a> Translate for OffsetPageTable<'a> { +impl Translate for OffsetPageTable<'_> { #[inline] fn translate(&self, addr: VirtAddr) -> TranslateResult { self.inner.translate(addr) } } -impl<'a> CleanUp for OffsetPageTable<'a> { +impl CleanUp for OffsetPageTable<'_> { #[inline] unsafe fn clean_up(&mut self, frame_deallocator: &mut D) where diff --git a/src/structures/paging/mapper/recursive_page_table.rs b/src/structures/paging/mapper/recursive_page_table.rs index e8430b3f..779ab909 100644 --- a/src/structures/paging/mapper/recursive_page_table.rs +++ b/src/structures/paging/mapper/recursive_page_table.rs @@ -299,7 +299,7 @@ impl<'a> RecursivePageTable<'a> { } } -impl<'a> Mapper for RecursivePageTable<'a> { +impl Mapper for RecursivePageTable<'_> { #[inline] unsafe fn map_to_with_table_flags( &mut self, @@ -454,7 +454,7 @@ impl<'a> Mapper for RecursivePageTable<'a> { } } -impl<'a> Mapper for RecursivePageTable<'a> { +impl Mapper for RecursivePageTable<'_> { #[inline] unsafe fn map_to_with_table_flags( &mut self, @@ -650,7 +650,7 @@ impl<'a> Mapper for RecursivePageTable<'a> { } } -impl<'a> Mapper for RecursivePageTable<'a> { +impl Mapper for RecursivePageTable<'_> { #[inline] unsafe fn map_to_with_table_flags( &mut self, @@ -884,7 +884,7 @@ impl<'a> Mapper for RecursivePageTable<'a> { } } -impl<'a> Translate for RecursivePageTable<'a> { +impl Translate for RecursivePageTable<'_> { #[allow(clippy::inconsistent_digit_grouping)] fn translate(&self, addr: VirtAddr) -> TranslateResult { let page = Page::containing_address(addr); @@ -957,7 +957,7 @@ impl<'a> Translate for RecursivePageTable<'a> { } } -impl<'a> CleanUp for RecursivePageTable<'a> { +impl CleanUp for RecursivePageTable<'_> { #[inline] unsafe fn clean_up(&mut self, frame_deallocator: &mut D) where diff --git a/src/structures/paging/page.rs b/src/structures/paging/page.rs index e05b576d..a51b4df4 100644 --- a/src/structures/paging/page.rs +++ b/src/structures/paging/page.rs @@ -23,17 +23,17 @@ pub trait PageSize: Copy + Eq + PartialOrd + Ord + Sealed { pub trait NotGiantPageSize: PageSize {} /// A standard 4KiB page. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum Size4KiB {} /// A “huge” 2MiB page. -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum Size2MiB {} /// A “giant” 1GiB page. /// /// (Only available on newer x86_64 CPUs.) -#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum Size1GiB {} impl PageSize for Size4KiB { @@ -160,16 +160,27 @@ impl Page { // FIXME: Move this into the `Step` impl, once `Step` is stabilized. #[cfg(any(feature = "instructions", feature = "step_trait"))] - pub(crate) fn steps_between_impl(start: &Self, end: &Self) -> Option { - VirtAddr::steps_between_impl(&start.start_address, &end.start_address) - .map(|steps| steps / S::SIZE as usize) + pub(crate) fn steps_between_impl(start: &Self, end: &Self) -> (usize, Option) { + use core::convert::TryFrom; + + if let Some(steps) = + VirtAddr::steps_between_u64(&start.start_address(), &end.start_address()) + { + let steps = steps / S::SIZE; + let steps = usize::try_from(steps).ok(); + (steps.unwrap_or(usize::MAX), steps) + } else { + (0, None) + } } // FIXME: Move this into the `Step` impl, once `Step` is stabilized. #[cfg(any(feature = "instructions", feature = "step_trait"))] pub(crate) fn forward_checked_impl(start: Self, count: usize) -> Option { - let count = count.checked_mul(S::SIZE as usize)?; - let start_address = VirtAddr::forward_checked_impl(start.start_address, count)?; + use core::convert::TryFrom; + + let count = u64::try_from(count).ok()?.checked_mul(S::SIZE)?; + let start_address = VirtAddr::forward_checked_u64(start.start_address, count)?; Some(Self { start_address, size: PhantomData, @@ -293,7 +304,7 @@ impl Sub for Page { #[cfg(feature = "step_trait")] impl Step for Page { - fn steps_between(start: &Self, end: &Self) -> Option { + fn steps_between(start: &Self, end: &Self) -> (usize, Option) { Self::steps_between_impl(start, end) } @@ -302,8 +313,10 @@ impl Step for Page { } fn backward_checked(start: Self, count: usize) -> Option { - let count = count.checked_mul(S::SIZE as usize)?; - let start_address = Step::backward_checked(start.start_address, count)?; + use core::convert::TryFrom; + + let count = u64::try_from(count).ok()?.checked_mul(S::SIZE)?; + let start_address = VirtAddr::backward_checked_u64(start.start_address, count)?; Some(Self { start_address, size: PhantomData, @@ -327,6 +340,22 @@ impl PageRange { pub fn is_empty(&self) -> bool { self.start >= self.end } + + /// Returns the number of pages in the range. + #[inline] + pub fn len(&self) -> u64 { + if !self.is_empty() { + self.end - self.start + } else { + 0 + } + } + + /// Returns the size in bytes of all pages within the range. + #[inline] + pub fn size(&self) -> u64 { + S::SIZE * self.len() + } } impl Iterator for PageRange { @@ -380,6 +409,22 @@ impl PageRangeInclusive { pub fn is_empty(&self) -> bool { self.start > self.end } + + /// Returns the number of frames in the range. + #[inline] + pub fn len(&self) -> u64 { + if !self.is_empty() { + self.end - self.start + 1 + } else { + 0 + } + } + + /// Returns the size in bytes of all frames within the range. + #[inline] + pub fn size(&self) -> u64 { + S::SIZE * self.len() + } } impl Iterator for PageRangeInclusive { @@ -429,6 +474,15 @@ impl fmt::Display for AddressNotAligned { mod tests { use super::*; + fn test_is_hash() {} + + #[test] + pub fn test_page_is_hash() { + test_is_hash::>(); + test_is_hash::>(); + test_is_hash::>(); + } + #[test] pub fn test_page_ranges() { let page_size = Size4KiB::SIZE; @@ -475,4 +529,123 @@ mod tests { } assert_eq!(range_inclusive.next(), None); } + + #[test] + pub fn test_page_range_len() { + let start_addr = VirtAddr::new(0xdead_beaf); + let start = Page::::containing_address(start_addr); + let end = start + 50; + + let range = PageRange { start, end }; + assert_eq!(range.len(), 50); + + let range_inclusive = PageRangeInclusive { start, end }; + assert_eq!(range_inclusive.len(), 51); + } + + #[test] + #[cfg(feature = "step_trait")] + fn page_step_forward() { + let test_cases = [ + (0, 0, Some(0)), + (0, 1, Some(0x1000)), + (0x1000, 1, Some(0x2000)), + (0x7fff_ffff_f000, 1, Some(0xffff_8000_0000_0000)), + (0xffff_8000_0000_0000, 1, Some(0xffff_8000_0000_1000)), + (0xffff_ffff_ffff_f000, 1, None), + #[cfg(target_pointer_width = "64")] + (0x7fff_ffff_f000, 0x1_2345_6789, Some(0xffff_9234_5678_8000)), + #[cfg(target_pointer_width = "64")] + (0x7fff_ffff_f000, 0x8_0000_0000, Some(0xffff_ffff_ffff_f000)), + #[cfg(target_pointer_width = "64")] + (0x7fff_fff0_0000, 0x8_0000_00ff, Some(0xffff_ffff_ffff_f000)), + #[cfg(target_pointer_width = "64")] + (0x7fff_fff0_0000, 0x8_0000_0100, None), + #[cfg(target_pointer_width = "64")] + (0x7fff_ffff_f000, 0x8_0000_0001, None), + // Make sure that we handle `steps * PAGE_SIZE > u32::MAX` + // correctly on 32-bit targets. + (0, 0x10_0000, Some(0x1_0000_0000)), + ]; + for (start, count, result) in test_cases { + let start = Page::::from_start_address(VirtAddr::new(start)).unwrap(); + let result = result + .map(|result| Page::::from_start_address(VirtAddr::new(result)).unwrap()); + assert_eq!(Step::forward_checked(start, count), result); + } + } + + #[test] + #[cfg(feature = "step_trait")] + fn page_step_backwards() { + let test_cases = [ + (0, 0, Some(0)), + (0, 1, None), + (0x1000, 1, Some(0)), + (0xffff_8000_0000_0000, 1, Some(0x7fff_ffff_f000)), + (0xffff_8000_0000_1000, 1, Some(0xffff_8000_0000_0000)), + #[cfg(target_pointer_width = "64")] + (0xffff_9234_5678_8000, 0x1_2345_6789, Some(0x7fff_ffff_f000)), + #[cfg(target_pointer_width = "64")] + (0xffff_8000_0000_0000, 0x8_0000_0000, Some(0)), + #[cfg(target_pointer_width = "64")] + (0xffff_8000_0000_0000, 0x7_ffff_ff01, Some(0xff000)), + #[cfg(target_pointer_width = "64")] + (0xffff_8000_0000_0000, 0x8_0000_0001, None), + // Make sure that we handle `steps * PAGE_SIZE > u32::MAX` + // correctly on 32-bit targets. + (0x1_0000_0000, 0x10_0000, Some(0)), + ]; + for (start, count, result) in test_cases { + let start = Page::::from_start_address(VirtAddr::new(start)).unwrap(); + let result = result + .map(|result| Page::::from_start_address(VirtAddr::new(result)).unwrap()); + assert_eq!(Step::backward_checked(start, count), result); + } + } + + #[test] + #[cfg(feature = "step_trait")] + fn page_steps_between() { + let test_cases = [ + (0, 0, 0, Some(0)), + (0, 0x1000, 1, Some(1)), + (0x1000, 0, 0, None), + (0x1000, 0x1000, 0, Some(0)), + (0x7fff_ffff_f000, 0xffff_8000_0000_0000, 1, Some(1)), + (0xffff_8000_0000_0000, 0x7fff_ffff_f000, 0, None), + (0xffff_8000_0000_0000, 0xffff_8000_0000_0000, 0, Some(0)), + (0xffff_8000_0000_0000, 0xffff_8000_0000_1000, 1, Some(1)), + (0xffff_8000_0000_1000, 0xffff_8000_0000_0000, 0, None), + (0xffff_8000_0000_1000, 0xffff_8000_0000_1000, 0, Some(0)), + // Make sure that we handle `steps * PAGE_SIZE > u32::MAX` correctly on 32-bit + // targets. + ( + 0x0000_0000_0000, + 0x0001_0000_0000, + 0x10_0000, + Some(0x10_0000), + ), + // The returned bounds are different when `steps` doesn't fit in + // into `usize`. On 64-bit targets, `0x1_0000_0000` fits into + // `usize`, so we can return exact lower and upper bounds. On + // 32-bit targets, `0x1_0000_0000` doesn't fit into `usize`, so we + // only return an lower bound of `usize::MAX` and don't return an + // upper bound. + #[cfg(target_pointer_width = "64")] + ( + 0x0000_0000_0000, + 0x1000_0000_0000, + 0x1_0000_0000, + Some(0x1_0000_0000), + ), + #[cfg(not(target_pointer_width = "64"))] + (0x0000_0000_0000, 0x1000_0000_0000, usize::MAX, None), + ]; + for (start, end, lower, upper) in test_cases { + let start = Page::::from_start_address(VirtAddr::new(start)).unwrap(); + let end = Page::from_start_address(VirtAddr::new(end)).unwrap(); + assert_eq!(Step::steps_between(&start, &end), (lower, upper)); + } + } } diff --git a/src/structures/paging/page_table.rs b/src/structures/paging/page_table.rs index fe9ebde0..e9069bcc 100644 --- a/src/structures/paging/page_table.rs +++ b/src/structures/paging/page_table.rs @@ -353,8 +353,8 @@ impl From for usize { #[cfg(feature = "step_trait")] impl Step for PageTableIndex { #[inline] - fn steps_between(start: &Self, end: &Self) -> Option { - end.0.checked_sub(start.0).map(usize::from) + fn steps_between(start: &Self, end: &Self) -> (usize, Option) { + Step::steps_between(&start.0, &end.0) } #[inline] diff --git a/src/structures/tss.rs b/src/structures/tss.rs index dcedc383..cbfeff47 100644 --- a/src/structures/tss.rs +++ b/src/structures/tss.rs @@ -5,16 +5,17 @@ use core::mem::size_of; /// In 64-bit mode the TSS holds information that is not /// directly related to the task-switch mechanism, -/// but is used for finding kernel level stack -/// if interrupts arrive while in kernel mode. +/// but is used for stack switching when an interrupt or exception occurs. #[derive(Debug, Clone, Copy)] #[repr(C, packed(4))] pub struct TaskStateSegment { reserved_1: u32, /// The full 64-bit canonical forms of the stack pointers (RSP) for privilege levels 0-2. + /// The stack pointers used when a privilege level change occurs from a lower privilege level to a higher one. pub privilege_stack_table: [VirtAddr; 3], reserved_2: u64, /// The full 64-bit canonical forms of the interrupt stack table (IST) pointers. + /// The stack pointers used when an entry in the Interrupt Descriptor Table has an IST value other than 0. pub interrupt_stack_table: [VirtAddr; 7], reserved_3: u64, reserved_4: u16, diff --git a/testing/src/gdt.rs b/testing/src/gdt.rs index 2fa192dd..28620921 100644 --- a/testing/src/gdt.rs +++ b/testing/src/gdt.rs @@ -13,7 +13,7 @@ lazy_static! { const STACK_SIZE: usize = 4096; static mut STACK: [u8; STACK_SIZE] = [0; STACK_SIZE]; - let stack_start = VirtAddr::from_ptr(unsafe { ptr::addr_of!(STACK) }); + let stack_start = VirtAddr::from_ptr(ptr::addr_of!(STACK)); let stack_end = stack_start + STACK_SIZE as u64; stack_end };