Skip to content

Commit

Permalink
Merge pull request #1367 from stlankes/irq
Browse files Browse the repository at this point in the history
revise interrupt handling to allow interrupt sharing between devices
  • Loading branch information
mkroening authored Aug 29, 2024
2 parents 15c087f + 943d514 commit 82f0d28
Show file tree
Hide file tree
Showing 8 changed files with 234 additions and 327 deletions.
120 changes: 52 additions & 68 deletions src/arch/aarch64/kernel/interrupts.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use alloc::collections::BTreeMap;
use alloc::collections::{BTreeMap, VecDeque};
use alloc::vec::Vec;
use core::arch::asm;
use core::ptr;
Expand All @@ -19,8 +19,6 @@ use crate::arch::aarch64::mm::{virtualmem, PhysAddr};
use crate::core_scheduler;
use crate::scheduler::{self, CoreId};

/// maximum number of interrupt handlers
const MAX_HANDLERS: usize = 256;
/// The ID of the first Private Peripheral Interrupt.
const PPI_START: u8 = 16;
/// The ID of the first Shared Peripheral Interrupt.
Expand All @@ -29,32 +27,16 @@ const SPI_START: u8 = 32;
/// Software-generated interrupt for rescheduling
pub(crate) const SGI_RESCHED: u8 = 1;

type InterruptHandlerQueue = VecDeque<fn()>;

/// Number of the timer interrupt
static mut TIMER_INTERRUPT: u32 = 0;
/// A handler function for an interrupt.
///
/// Returns true if we should reschedule.
type HandlerFunc = fn(state: &State) -> bool;
/// Possible interrupt handlers
static mut INTERRUPT_HANDLERS: [Option<HandlerFunc>; MAX_HANDLERS] = [None; MAX_HANDLERS];
static INTERRUPT_HANDLERS: InterruptSpinMutex<HashMap<u8, InterruptHandlerQueue, RandomState>> =
InterruptSpinMutex::new(HashMap::with_hasher(RandomState::with_seeds(0, 0, 0, 0)));
/// Driver for the Arm Generic Interrupt Controller version 3 (or 4).
pub(crate) static mut GIC: OnceCell<GicV3> = OnceCell::new();

fn timer_handler(_state: &State) -> bool {
debug!("Handle timer interrupt");

// disable timer
unsafe {
asm!(
"msr cntp_cval_el0, xzr",
"msr cntp_ctl_el0, xzr",
options(nostack, nomem),
);
}

true
}

/// Enable all interrupts
#[inline]
pub fn enable() {
Expand Down Expand Up @@ -95,80 +77,66 @@ pub fn disable() {
}

#[allow(dead_code)]
pub(crate) fn irq_install_handler(irq_number: u8, handler: HandlerFunc) {
pub(crate) fn irq_install_handler(irq_number: u8, handler: fn()) {
debug!("Install handler for interrupt {}", irq_number);
unsafe {
INTERRUPT_HANDLERS[irq_number as usize + SPI_START as usize] = Some(handler);
let irq_number = irq_number + SPI_START;
let mut guard = INTERRUPT_HANDLERS.lock();
if let Some(queue) = guard.get_mut(&irq_number) {
queue.push_back(handler);
} else {
let mut queue = VecDeque::new();
queue.push_back(handler);
guard.insert(irq_number, queue);
}
}

#[no_mangle]
pub(crate) extern "C" fn do_fiq(state: &State) -> *mut usize {
pub(crate) extern "C" fn do_fiq(_state: &State) -> *mut usize {
if let Some(irqid) = GicV3::get_and_acknowledge_interrupt() {
let mut reschedule: bool = false;
let vector: usize = u32::from(irqid).try_into().unwrap();
let vector: u8 = u32::from(irqid).try_into().unwrap();

debug!("Receive fiq {}", vector);
increment_irq_counter(vector.try_into().unwrap());
increment_irq_counter(vector);

if vector < MAX_HANDLERS {
unsafe {
if let Some(handler) = INTERRUPT_HANDLERS[vector] {
reschedule = handler(state);
}
if let Some(queue) = INTERRUPT_HANDLERS.lock().get(&vector) {
for handler in queue.iter() {
handler();
}
}

crate::executor::run();
core_scheduler().handle_waiting_tasks();

GicV3::end_interrupt(irqid);

if unsafe {
reschedule
|| vector == TIMER_INTERRUPT.try_into().unwrap()
|| vector == SGI_RESCHED.into()
} {
// a timer interrupt may have caused unblocking of tasks
return core_scheduler()
.scheduler()
.unwrap_or(core::ptr::null_mut());
}
return core_scheduler()
.scheduler()
.unwrap_or(core::ptr::null_mut());
}

core::ptr::null_mut()
}

#[no_mangle]
pub(crate) extern "C" fn do_irq(state: &State) -> *mut usize {
pub(crate) extern "C" fn do_irq(_state: &State) -> *mut usize {
if let Some(irqid) = GicV3::get_and_acknowledge_interrupt() {
let mut reschedule: bool = false;
let vector: usize = u32::from(irqid).try_into().unwrap();
let vector: u8 = u32::from(irqid).try_into().unwrap();

debug!("Receive interrupt {}", vector);
increment_irq_counter(vector.try_into().unwrap());
increment_irq_counter(vector);

if vector < MAX_HANDLERS {
unsafe {
if let Some(handler) = INTERRUPT_HANDLERS[vector] {
reschedule = handler(state);
}
if let Some(queue) = INTERRUPT_HANDLERS.lock().get(&vector) {
for handler in queue.iter() {
handler();
}
}

crate::executor::run();
core_scheduler().handle_waiting_tasks();

GicV3::end_interrupt(irqid);

if unsafe {
reschedule
|| vector == TIMER_INTERRUPT.try_into().unwrap()
|| vector == SGI_RESCHED.into()
} {
// a timer interrupt may have caused unblocking of tasks
return core_scheduler()
.scheduler()
.unwrap_or(core::ptr::null_mut());
}
return core_scheduler()
.scheduler()
.unwrap_or(core::ptr::null_mut());
}

core::ptr::null_mut()
Expand Down Expand Up @@ -311,9 +279,25 @@ pub(crate) fn init() {
"Timer interrupt: {}, type {}, flags {}",
irq, irqtype, irqflags
);
unsafe {
INTERRUPT_HANDLERS[irq as usize + PPI_START as usize] = Some(timer_handler);

fn timer_handler() {
debug!("Handle timer interrupt");

// disable timer
unsafe {
asm!(
"msr cntp_cval_el0, xzr",
"msr cntp_ctl_el0, xzr",
options(nostack, nomem),
);
}
}

let mut queue = VecDeque::<fn()>::new();
queue.push_back(timer_handler);
INTERRUPT_HANDLERS
.lock()
.insert(u8::try_from(irq).unwrap() + PPI_START, queue);
IRQ_NAMES
.lock()
.insert(u8::try_from(irq).unwrap() + PPI_START, "Timer");
Expand Down
83 changes: 45 additions & 38 deletions src/arch/riscv64/kernel/interrupts.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,9 @@
use alloc::collections::VecDeque;
use alloc::vec::Vec;

use hermit_sync::SpinMutex;
use ahash::RandomState;
use hashbrown::HashMap;
use hermit_sync::{InterruptSpinMutex, SpinMutex};
use riscv::asm::wfi;
use riscv::register::{scause, sie, sip, sstatus, stval};
use trapframe::TrapFrame;
Expand All @@ -16,9 +19,9 @@ static PLIC_CONTEXT: SpinMutex<u16> = SpinMutex::new(0x0);
/// PLIC context for new interrupt handlers
static CURRENT_INTERRUPTS: SpinMutex<Vec<u32>> = SpinMutex::new(Vec::new());

const MAX_IRQ: usize = 69;

static mut IRQ_HANDLERS: [usize; MAX_IRQ] = [0; MAX_IRQ];
type InterruptHandlerQueue = VecDeque<fn()>;
static INTERRUPT_HANDLERS: InterruptSpinMutex<HashMap<u8, InterruptHandlerQueue, RandomState>> =
InterruptSpinMutex::new(HashMap::with_hasher(RandomState::with_seeds(0, 0, 0, 0)));

/// Init Interrupts
pub fn install() {
Expand Down Expand Up @@ -116,7 +119,16 @@ pub fn irq_install_handler(irq_number: u8, handler: fn()) {
"Install handler for interrupt {}, context {}",
irq_number, *context
);
IRQ_HANDLERS[irq_number as usize - 1] = handler as usize;

let mut guard = INTERRUPT_HANDLERS.lock();
if let Some(queue) = guard.get_mut(&irq_number) {
queue.push_back(handler);
} else {
let mut queue = VecDeque::new();
queue.push_back(handler);
guard.insert(irq_number, queue);
}

// Set priority to 7 (highest on FU740)
let prio_address = *base_ptr + irq_number as usize * 4;
core::ptr::write_volatile(prio_address as *mut u32, 1);
Expand Down Expand Up @@ -172,45 +184,40 @@ pub extern "C" fn trap_handler(tf: &mut TrapFrame) {

/// Handles external interrupts
fn external_handler() {
unsafe {
let handler: Option<fn()> = {
// Claim interrupt
let base_ptr = PLIC_BASE.lock();
let context = PLIC_CONTEXT.lock();
//let claim_address = *base_ptr + 0x20_2004;
let claim_address = *base_ptr + 0x20_0004 + 0x1000 * (*context as usize);
let irq = core::ptr::read_volatile(claim_address as *mut u32);
if irq != 0 {
debug!("External INT: {}", irq);
let mut cur_int = CURRENT_INTERRUPTS.lock();
cur_int.push(irq);
if cur_int.len() > 1 {
warn!("More than one external interrupt is pending!");
}

// Call handler
if IRQ_HANDLERS[irq as usize - 1] != 0 {
let ptr = IRQ_HANDLERS[irq as usize - 1] as *const ();
let handler: fn() = core::mem::transmute(ptr);
Some(handler)
} else {
error!("Interrupt handler not installed");
None
}
} else {
None
}
};
use crate::arch::kernel::core_local::core_scheduler;
use crate::scheduler::PerCoreSchedulerExt;

// Claim interrupt
let base_ptr = PLIC_BASE.lock();
let context = PLIC_CONTEXT.lock();
//let claim_address = *base_ptr + 0x20_2004;
let claim_address = *base_ptr + 0x20_0004 + 0x1000 * (*context as usize);
let irq = unsafe { core::ptr::read_volatile(claim_address as *mut u32) };

if let Some(handler) = handler {
handler();
external_eoi();

if irq != 0 {
debug!("External INT: {}", irq);
let mut cur_int = CURRENT_INTERRUPTS.lock();
cur_int.push(irq);
if cur_int.len() > 1 {
warn!("More than one external interrupt is pending!");
}

// Call handler
if let Some(queue) = INTERRUPT_HANDLERS.lock().get(&u8::try_from(irq).unwrap()) {
for handler in queue.iter() {
handler();
}
}
crate::executor::run();

core_scheduler().reschedule();
}
}

/// End of external interrupt
#[cfg(feature = "tcp")]
pub fn external_eoi() {
fn external_eoi() {
unsafe {
let base_ptr = PLIC_BASE.lock();
let context = PLIC_CONTEXT.lock();
Expand Down
Loading

0 comments on commit 82f0d28

Please sign in to comment.