Skip to content

Commit

Permalink
Merge pull request #381 from rust-osdev/gdt_atomic
Browse files Browse the repository at this point in the history
Allow GDT to be loaded with shared reference
  • Loading branch information
josephlr authored Apr 16, 2022
2 parents e70b8a3 + 8bb01e8 commit 156cfda
Show file tree
Hide file tree
Showing 3 changed files with 50 additions and 73 deletions.
60 changes: 0 additions & 60 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,9 +11,6 @@
#![deny(missing_debug_implementations)]
#![deny(unsafe_op_in_unsafe_fn)]

use core::cell::UnsafeCell;
use core::sync::atomic::{AtomicBool, Ordering};

pub use crate::addr::{align_down, align_up, PhysAddr, VirtAddr};

pub mod addr;
Expand Down Expand Up @@ -66,60 +63,3 @@ impl PrivilegeLevel {
}
}
}

/// A wrapper that can be used to safely create one mutable reference `&'static mut T` from a static variable.
///
/// `SingleUseCell` is safe because it ensures that it only ever gives out one reference.
///
/// ``SingleUseCell<T>` is a safe alternative to `static mut` or a static `UnsafeCell<T>`.
#[derive(Debug)]
pub struct SingleUseCell<T> {
used: AtomicBool,
value: UnsafeCell<T>,
}

impl<T> SingleUseCell<T> {
/// Construct a new SingleUseCell.
pub const fn new(value: T) -> Self {
Self {
used: AtomicBool::new(false),
value: UnsafeCell::new(value),
}
}

/// Try to acquire a mutable reference to the wrapped value.
/// This will only succeed the first time the function is
/// called and fail on all following calls.
///
/// ```
/// use x86_64::SingleUseCell;
///
/// static FOO: SingleUseCell<i32> = SingleUseCell::new(0);
///
/// // Call `try_get_mut` for the first time and get a reference.
/// let first: &'static mut i32 = FOO.try_get_mut().unwrap();
/// assert_eq!(first, &0);
///
/// // Calling `try_get_mut` again will return `None`.
/// assert_eq!(FOO.try_get_mut(), None);
/// ```
pub fn try_get_mut(&self) -> Option<&mut T> {
let already_used = self.used.swap(true, Ordering::AcqRel);
if already_used {
None
} else {
Some(unsafe {
// SAFETY: no reference has been given out yet and we won't give out another.
&mut *self.value.get()
})
}
}
}

// SAFETY: Sharing a `SingleUseCell<T>` between threads is safe regardless of whether `T` is `Sync`
// because we only expose the inner value once to one thread. The `T: Send` bound makes sure that
// sending a unique reference to another thread is safe.
unsafe impl<T: Send> Sync for SingleUseCell<T> {}

// SAFETY: It's safe to send a `SingleUseCell<T>` to another thread if it's safe to send `T`.
unsafe impl<T: Send> Send for SingleUseCell<T> {}
38 changes: 33 additions & 5 deletions src/structures/gdt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,30 +10,55 @@ use core::fmt;
#[cfg(doc)]
use crate::registers::segmentation::{Segment, CS, SS};

#[cfg(feature = "instructions")]
use core::sync::atomic::{AtomicU64 as EntryValue, Ordering};
#[cfg(not(feature = "instructions"))]
use u64 as EntryValue;

/// 8-byte entry in a descriptor table.
///
/// A [`GlobalDescriptorTable`] (or LDT) is an array of these entries, and
/// [`SegmentSelector`]s index into this array. Each [`Descriptor`] in the table
/// uses either 1 Entry (if it is a [`UserSegment`](Descriptor::UserSegment)) or
/// 2 Entries (if it is a [`SystemSegment`](Descriptor::SystemSegment)). This
/// type exists to give users access to the raw entry bits in a GDT.
#[derive(Clone, PartialEq, Eq)]
#[repr(transparent)]
pub struct Entry(u64);
pub struct Entry(EntryValue);

impl Entry {
// Create a new Entry from a raw value.
const fn new(raw: u64) -> Self {
#[cfg(feature = "instructions")]
let raw = EntryValue::new(raw);
Self(raw)
}

/// The raw bits for this entry. Depending on the [`Descriptor`] type, these
/// bits may correspond to those in [`DescriptorFlags`].
pub fn raw(&self) -> u64 {
self.0
// TODO: Make this const fn when AtomicU64::load is const.
#[cfg(feature = "instructions")]
let raw = self.0.load(Ordering::SeqCst);
#[cfg(not(feature = "instructions"))]
let raw = self.0;
raw
}
}

impl Clone for Entry {
fn clone(&self) -> Self {
Self::new(self.raw())
}
}

impl PartialEq for Entry {
fn eq(&self, other: &Self) -> bool {
self.raw() == other.raw()
}
}

impl Eq for Entry {}

impl fmt::Debug for Entry {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Display inner value as hex
Expand Down Expand Up @@ -99,6 +124,9 @@ impl<const MAX: usize> GlobalDescriptorTable<MAX> {
// TODO: Replace with compiler error when feature(generic_const_exprs) is stable.
assert!(MAX > 0, "A GDT cannot have 0 entries");
assert!(MAX <= (1 << 13), "A GDT can only have at most 2^13 entries");

// TODO: Replace with inline_const when it's stable.
#[allow(clippy::declare_interior_mutable_const)]
const NULL: Entry = Entry::new(0);
Self {
table: [NULL; MAX],
Expand Down Expand Up @@ -195,7 +223,7 @@ impl<const MAX: usize> GlobalDescriptorTable<MAX> {
/// [`SS::set_reg()`] and [`CS::set_reg()`].
#[cfg(feature = "instructions")]
#[inline]
pub fn load(&'static mut self) {
pub fn load(&'static self) {
// SAFETY: static lifetime ensures no modification after loading.
unsafe { self.load_unsafe() };
}
Expand All @@ -213,7 +241,7 @@ impl<const MAX: usize> GlobalDescriptorTable<MAX> {
///
#[cfg(feature = "instructions")]
#[inline]
pub unsafe fn load_unsafe(&mut self) {
pub unsafe fn load_unsafe(&self) {
use crate::instructions::tables::lgdt;
unsafe {
lgdt(&self.pointer());
Expand Down
25 changes: 17 additions & 8 deletions testing/src/gdt.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use lazy_static::lazy_static;
use x86_64::structures::gdt::{Descriptor, GlobalDescriptorTable, SegmentSelector};
use x86_64::structures::tss::TaskStateSegment;
use x86_64::{SingleUseCell, VirtAddr};
use x86_64::VirtAddr;

pub const DOUBLE_FAULT_IST_INDEX: u16 = 0;

Expand All @@ -18,12 +18,14 @@ lazy_static! {
};
tss
};
static ref GDT: (SingleUseCell<GlobalDescriptorTable>, Selectors) = {
static ref GDT: (GlobalDescriptorTable, Selectors) = {
let mut gdt = GlobalDescriptorTable::new();
// Add an unused segment so we get a different value for CS
gdt.append(Descriptor::kernel_data_segment());
let code_selector = gdt.append(Descriptor::kernel_code_segment());
let tss_selector = gdt.append(Descriptor::tss_segment(&TSS));
(
SingleUseCell::new(gdt),
gdt,
Selectors {
code_selector,
tss_selector,
Expand All @@ -41,9 +43,16 @@ pub fn init() {
use x86_64::instructions::segmentation::{Segment, CS};
use x86_64::instructions::tables::load_tss;

GDT.0.try_get_mut().unwrap().load();
unsafe {
CS::set_reg(GDT.1.code_selector);
load_tss(GDT.1.tss_selector);
}
// Make sure loading CS actually changes the value
GDT.0.load();
assert_ne!(CS::get_reg(), GDT.1.code_selector);
unsafe { CS::set_reg(GDT.1.code_selector) };
assert_eq!(CS::get_reg(), GDT.1.code_selector);

// Loading the TSS should mark the GDT entry as busy
let tss_idx: usize = GDT.1.tss_selector.index().into();
let old_tss_entry = GDT.0.entries()[tss_idx].clone();
unsafe { load_tss(GDT.1.tss_selector) };
let new_tss_entry = GDT.0.entries()[tss_idx].clone();
assert_ne!(old_tss_entry, new_tss_entry);
}

0 comments on commit 156cfda

Please sign in to comment.