From 2adc17a5442614dbe34626fdd9b32de7c07b8086 Mon Sep 17 00:00:00 2001 From: Luo Jia Date: Wed, 5 Jan 2022 14:17:58 +0800 Subject: [PATCH] More RISC-V instructions in `core::arch` (#1271) --- crates/core_arch/src/core_arch_docs.md | 6 +- crates/core_arch/src/mod.rs | 30 +- crates/core_arch/src/riscv/mod.rs | 10 - crates/core_arch/src/riscv64/mod.rs | 49 +++ crates/core_arch/src/riscv_shared/mod.rs | 481 +++++++++++++++++++++++ 5 files changed, 558 insertions(+), 18 deletions(-) delete mode 100644 crates/core_arch/src/riscv/mod.rs create mode 100644 crates/core_arch/src/riscv64/mod.rs create mode 100644 crates/core_arch/src/riscv_shared/mod.rs diff --git a/crates/core_arch/src/core_arch_docs.md b/crates/core_arch/src/core_arch_docs.md index be0f98c843..58b7eda9a8 100644 --- a/crates/core_arch/src/core_arch_docs.md +++ b/crates/core_arch/src/core_arch_docs.md @@ -185,7 +185,8 @@ others at: * [`x86_64`] * [`arm`] * [`aarch64`] -* [`riscv`] +* [`riscv32`] +* [`riscv64`] * [`mips`] * [`mips64`] * [`powerpc`] @@ -197,7 +198,8 @@ others at: [`x86_64`]: x86_64/index.html [`arm`]: arm/index.html [`aarch64`]: aarch64/index.html -[`riscv`]: riscv/index.html +[`riscv32`]: riscv32/index.html +[`riscv64`]: riscv64/index.html [`mips`]: mips/index.html [`mips64`]: mips64/index.html [`powerpc`]: powerpc/index.html diff --git a/crates/core_arch/src/mod.rs b/crates/core_arch/src/mod.rs index ec28bd62c4..20751eeec5 100644 --- a/crates/core_arch/src/mod.rs +++ b/crates/core_arch/src/mod.rs @@ -56,14 +56,28 @@ pub mod arch { pub use crate::core_arch::aarch64::*; } - /// Platform-specific intrinsics for the `riscv` platform. + /// Platform-specific intrinsics for the `riscv32` platform. /// /// See the [module documentation](../index.html) for more details. - #[cfg(any(target_arch = "riscv32", target_arch = "riscv64", doc))] - #[doc(cfg(any(target_arch = "riscv32", target_arch = "riscv64")))] + #[cfg(any(target_arch = "riscv32", doc))] + #[doc(cfg(any(target_arch = "riscv32")))] #[unstable(feature = "stdsimd", issue = "27731")] - pub mod riscv { - pub use crate::core_arch::riscv::*; + pub mod riscv32 { + pub use crate::core_arch::riscv_shared::*; + } + + /// Platform-specific intrinsics for the `riscv64` platform. + /// + /// See the [module documentation](../index.html) for more details. + #[cfg(any(target_arch = "riscv64", doc))] + #[doc(cfg(any(target_arch = "riscv64")))] + #[unstable(feature = "stdsimd", issue = "27731")] + pub mod riscv64 { + pub use crate::core_arch::riscv64::*; + // RISC-V RV64 supports all RV32 instructions as well in current specifications (2022-01-05). + // Module `riscv_shared` includes instructions available under all RISC-V platforms, + // i.e. RISC-V RV32 instructions. + pub use crate::core_arch::riscv_shared::*; } /// Platform-specific intrinsics for the `wasm32` platform. @@ -264,7 +278,11 @@ mod arm; #[cfg(any(target_arch = "riscv32", target_arch = "riscv64", doc))] #[doc(cfg(any(target_arch = "riscv32", target_arch = "riscv64")))] -mod riscv; +mod riscv_shared; + +#[cfg(any(target_arch = "riscv64", doc))] +#[doc(cfg(any(target_arch = "riscv64")))] +mod riscv64; #[cfg(any(target_family = "wasm", doc))] #[doc(cfg(target_family = "wasm"))] diff --git a/crates/core_arch/src/riscv/mod.rs b/crates/core_arch/src/riscv/mod.rs deleted file mode 100644 index b93c35fc4f..0000000000 --- a/crates/core_arch/src/riscv/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -//! RISC-V intrinsics - -/// Generates the `PAUSE` instruction -/// -/// The PAUSE instruction is a HINT that indicates the current hart's rate of instruction retirement -/// should be temporarily reduced or paused. The duration of its effect must be bounded and may be zero. -#[inline] -pub fn pause() { - unsafe { crate::arch::asm!(".word 0x0100000F", options(nomem, nostack)) } -} diff --git a/crates/core_arch/src/riscv64/mod.rs b/crates/core_arch/src/riscv64/mod.rs new file mode 100644 index 0000000000..24aae78325 --- /dev/null +++ b/crates/core_arch/src/riscv64/mod.rs @@ -0,0 +1,49 @@ +//! RISC-V RV64 specific intrinsics +use crate::arch::asm; + +/// Loads virtual machine memory by unsigned word integer +/// +/// This instruction performs an explicit memory access as though `V=1`; +/// i.e., with the address translation and protection, and the endianness, that apply to memory +/// accesses in either VS-mode or VU-mode. +/// +/// This operation is not available under RV32 base instruction set. +/// +/// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.WU` +/// instruction which is effectively an unreference to any memory address. +#[inline] +pub unsafe fn hlv_wu(src: *const u32) -> u32 { + let value: u32; + asm!(".insn i 0x73, 0x4, {}, {}, 0x681", out(reg) value, in(reg) src, options(readonly, nostack)); + value +} + +/// Loads virtual machine memory by unsigned double integer +/// +/// This instruction performs an explicit memory access as though `V=1`; +/// i.e., with the address translation and protection, and the endianness, that apply to memory +/// accesses in either VS-mode or VU-mode. +/// +/// This operation is not available under RV32 base instruction set. +/// +/// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.D` +/// instruction which is effectively an unreference to any memory address. +#[inline] +pub unsafe fn hlv_d(src: *const i64) -> i64 { + let value: i64; + asm!(".insn i 0x73, 0x4, {}, {}, 0x6C0", out(reg) value, in(reg) src, options(readonly, nostack)); + value +} + +/// Stores virtual machine memory by double integer +/// +/// This instruction performs an explicit memory access as though `V=1`; +/// i.e., with the address translation and protection, and the endianness, that apply to memory +/// accesses in either VS-mode or VU-mode. +/// +/// This function is unsafe for it accesses the virtual supervisor or user via a `HSV.D` +/// instruction which is effectively an unreference to any memory address. +#[inline] +pub unsafe fn hsv_d(dst: *mut i64, src: i64) { + asm!(".insn r 0x73, 0x4, 0x37, x0, {}, {}", in(reg) dst, in(reg) src, options(nostack)); +} diff --git a/crates/core_arch/src/riscv_shared/mod.rs b/crates/core_arch/src/riscv_shared/mod.rs new file mode 100644 index 0000000000..a2c9cb2436 --- /dev/null +++ b/crates/core_arch/src/riscv_shared/mod.rs @@ -0,0 +1,481 @@ +//! Shared RISC-V intrinsics + +use crate::arch::asm; + +/// Generates the `PAUSE` instruction +/// +/// The PAUSE instruction is a HINT that indicates the current hart's rate of instruction retirement +/// should be temporarily reduced or paused. The duration of its effect must be bounded and may be zero. +#[inline] +pub fn pause() { + unsafe { asm!(".insn i 0x0F, 0, x0, x0, 0x010", options(nomem, nostack)) } +} + +/// Generates the `NOP` instruction +/// +/// The NOP instruction does not change any architecturally visible state, except for +/// advancing the `pc` and incrementing any applicable performance counters. +#[inline] +pub fn nop() { + unsafe { asm!("nop", options(nomem, nostack)) } +} + +/// Generates the `WFI` instruction +/// +/// The WFI instruction provides a hint to the implementation that the current hart can be stalled +/// until an interrupt might need servicing. This instruction is a hint, +/// and a legal implementation is to simply implement WFI as a NOP. +#[inline] +pub unsafe fn wfi() { + asm!("wfi", options(nomem, nostack)) +} + +/// Generates the `FENCE.I` instruction +/// +/// A FENCE.I instruction ensures that a subsequent instruction fetch on a RISC-V hart will see +/// any previous data stores already visible to the same RISC-V hart. +/// +/// FENCE.I does not ensure that other RISC-V harts' instruction fetches will observe the +/// local hart's stores in a multiprocessor system. +#[inline] +pub unsafe fn fence_i() { + asm!("fence.i", options(nostack)) +} + +/// Supervisor memory management fence for given virtual address and address space +/// +/// The fence orders only reads and writes made to leaf page table entries corresponding to +/// the virtual address in parameter `vaddr`, for the address space identified by integer parameter +/// `asid`. Accesses to global mappings are not ordered. The fence also invalidates all +/// address-translation cache entries that contain leaf page table entries corresponding to the +/// virtual address in parameter `vaddr` and that match the address space identified by integer +/// parameter `asid`, except for entries containing global mappings. +#[inline] +pub unsafe fn sfence_vma(vaddr: usize, asid: usize) { + asm!("sfence.vma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) +} + +/// Supervisor memory management fence for given virtual address +/// +/// The fence orders only reads and writes made to leaf page table entries corresponding to +/// the virtual address in parameter `vaddr`, for all address spaces. +/// The fence also invalidates all address-translation cache entries that contain leaf page +/// table entries corresponding to the virtual address in parameter `vaddr`, for all address spaces. +#[inline] +pub unsafe fn sfence_vma_vaddr(vaddr: usize) { + asm!("sfence.vma {}, x0", in(reg) vaddr, options(nostack)) +} + +/// Supervisor memory management fence for given address space +/// +/// The fence orders all reads and writes made to any level of the page tables, +/// but only for the address space identified by integer parameter `asid`. +/// +/// Accesses to global mappings are not ordered. The fence also invalidates all +/// address-translation cache entries matching the address space identified by integer +/// parameter `asid`, except for entries containing global mappings. +#[inline] +pub unsafe fn sfence_vma_asid(asid: usize) { + asm!("sfence.vma x0, {}", in(reg) asid, options(nostack)) +} + +/// Supervisor memory management fence for all address spaces and virtual addresses +/// +/// The fence orders all reads and writes made to any level of the page +/// tables, for all address spaces. The fence also invalidates all address-translation cache entries, +/// for all address spaces. +#[inline] +pub unsafe fn sfence_vma_all() { + asm!("sfence.vma", options(nostack)) +} + +/// Invalidate supervisor translation cache for given virtual address and address space +/// +/// This instruction invalidates any address-translation cache entries that an +/// `SFENCE.VMA` instruction with the same values of `vaddr` and `asid` would invalidate. +#[inline] +pub unsafe fn sinval_vma(vaddr: usize, asid: usize) { + // asm!("sinval.vma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) + asm!(".insn r 0x73, 0, 0x0B, x0, {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) +} + +/// Invalidate supervisor translation cache for given virtual address +/// +/// This instruction invalidates any address-translation cache entries that an +/// `SFENCE.VMA` instruction with the same values of `vaddr` and `asid` would invalidate. +#[inline] +pub unsafe fn sinval_vma_vaddr(vaddr: usize) { + asm!(".insn r 0x73, 0, 0x0B, x0, {}, x0", in(reg) vaddr, options(nostack)) +} + +/// Invalidate supervisor translation cache for given address space +/// +/// This instruction invalidates any address-translation cache entries that an +/// `SFENCE.VMA` instruction with the same values of `vaddr` and `asid` would invalidate. +#[inline] +pub unsafe fn sinval_vma_asid(asid: usize) { + asm!(".insn r 0x73, 0, 0x0B, x0, x0, {}", in(reg) asid, options(nostack)) +} + +/// Invalidate supervisor translation cache for all address spaces and virtual addresses +/// +/// This instruction invalidates any address-translation cache entries that an +/// `SFENCE.VMA` instruction with the same values of `vaddr` and `asid` would invalidate. +#[inline] +pub unsafe fn sinval_vma_all() { + asm!(".insn r 0x73, 0, 0x0B, x0, x0, x0", options(nostack)) +} + +/// Generates the `SFENCE.W.INVAL` instruction +/// +/// This instruction guarantees that any previous stores already visible to the current RISC-V hart +/// are ordered before subsequent `SINVAL.VMA` instructions executed by the same hart. +#[inline] +pub unsafe fn sfence_w_inval() { + // asm!("sfence.w.inval", options(nostack)) + asm!(".insn i 0x73, 0, x0, x0, 0x180", options(nostack)) +} + +/// Generates the `SFENCE.INVAL.IR` instruction +/// +/// This instruction guarantees that any previous SINVAL.VMA instructions executed by the current hart +/// are ordered before subsequent implicit references by that hart to the memory-management data structures. +#[inline] +pub unsafe fn sfence_inval_ir() { + // asm!("sfence.inval.ir", options(nostack)) + asm!(".insn i 0x73, 0, x0, x0, 0x181", options(nostack)) +} + +/// Loads virtual machine memory by signed byte integer +/// +/// This instruction performs an explicit memory access as though `V=1`; +/// i.e., with the address translation and protection, and the endianness, that apply to memory +/// accesses in either VS-mode or VU-mode. +/// +/// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.B` +/// instruction which is effectively an unreference to any memory address. +#[inline] +pub unsafe fn hlv_b(src: *const i8) -> i8 { + let value: i8; + asm!(".insn i 0x73, 0x4, {}, {}, 0x600", out(reg) value, in(reg) src, options(readonly, nostack)); + value +} + +/// Loads virtual machine memory by unsigned byte integer +/// +/// This instruction performs an explicit memory access as though `V=1`; +/// i.e., with the address translation and protection, and the endianness, that apply to memory +/// accesses in either VS-mode or VU-mode. +/// +/// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.BU` +/// instruction which is effectively an unreference to any memory address. +#[inline] +pub unsafe fn hlv_bu(src: *const u8) -> u8 { + let value: u8; + asm!(".insn i 0x73, 0x4, {}, {}, 0x601", out(reg) value, in(reg) src, options(readonly, nostack)); + value +} + +/// Loads virtual machine memory by signed half integer +/// +/// This instruction performs an explicit memory access as though `V=1`; +/// i.e., with the address translation and protection, and the endianness, that apply to memory +/// accesses in either VS-mode or VU-mode. +/// +/// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.H` +/// instruction which is effectively an unreference to any memory address. +#[inline] +pub unsafe fn hlv_h(src: *const i16) -> i16 { + let value: i16; + asm!(".insn i 0x73, 0x4, {}, {}, 0x640", out(reg) value, in(reg) src, options(readonly, nostack)); + value +} + +/// Loads virtual machine memory by unsigned half integer +/// +/// This instruction performs an explicit memory access as though `V=1`; +/// i.e., with the address translation and protection, and the endianness, that apply to memory +/// accesses in either VS-mode or VU-mode. +/// +/// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.HU` +/// instruction which is effectively an unreference to any memory address. +#[inline] +pub unsafe fn hlv_hu(src: *const u16) -> u16 { + let value: u16; + asm!(".insn i 0x73, 0x4, {}, {}, 0x641", out(reg) value, in(reg) src, options(readonly, nostack)); + value +} + +/// Accesses virtual machine instruction by unsigned half integer +/// +/// This instruction performs an explicit memory access as though `V=1`; +/// the memory being read must be executable in both stages of address translation, +/// but read permission is not required. +/// +/// This function is unsafe for it accesses the virtual supervisor or user via a `HLVX.HU` +/// instruction which is effectively an unreference to any memory address. +#[inline] +pub unsafe fn hlvx_hu(src: *const u16) -> u16 { + let insn: u16; + asm!(".insn i 0x73, 0x4, {}, {}, 0x643", out(reg) insn, in(reg) src, options(readonly, nostack)); + insn +} + +/// Loads virtual machine memory by signed word integer +/// +/// This instruction performs an explicit memory access as though `V=1`; +/// i.e., with the address translation and protection, and the endianness, that apply to memory +/// accesses in either VS-mode or VU-mode. +/// +/// This function is unsafe for it accesses the virtual supervisor or user via a `HLV.W` +/// instruction which is effectively an unreference to any memory address. +#[inline] +pub unsafe fn hlv_w(src: *const i32) -> i32 { + let value: i32; + asm!(".insn i 0x73, 0x4, {}, {}, 0x680", out(reg) value, in(reg) src, options(readonly, nostack)); + value +} + +/// Accesses virtual machine instruction by unsigned word integer +/// +/// This instruction performs an explicit memory access as though `V=1`; +/// the memory being read must be executable in both stages of address translation, +/// but read permission is not required. +/// +/// This function is unsafe for it accesses the virtual supervisor or user via a `HLVX.WU` +/// instruction which is effectively an unreference to any memory address. +#[inline] +pub unsafe fn hlvx_wu(src: *const u32) -> u32 { + let insn: u32; + asm!(".insn i 0x73, 0x4, {}, {}, 0x683", out(reg) insn, in(reg) src, options(readonly, nostack)); + insn +} + +/// Stores virtual machine memory by byte integer +/// +/// This instruction performs an explicit memory access as though `V=1`; +/// i.e., with the address translation and protection, and the endianness, that apply to memory +/// accesses in either VS-mode or VU-mode. +/// +/// This function is unsafe for it accesses the virtual supervisor or user via a `HSV.B` +/// instruction which is effectively an unreference to any memory address. +#[inline] +pub unsafe fn hsv_b(dst: *mut i8, src: i8) { + asm!(".insn r 0x73, 0x4, 0x31, x0, {}, {}", in(reg) dst, in(reg) src, options(nostack)); +} + +/// Stores virtual machine memory by half integer +/// +/// This instruction performs an explicit memory access as though `V=1`; +/// i.e., with the address translation and protection, and the endianness, that apply to memory +/// accesses in either VS-mode or VU-mode. +/// +/// This function is unsafe for it accesses the virtual supervisor or user via a `HSV.H` +/// instruction which is effectively an unreference to any memory address. +#[inline] +pub unsafe fn hsv_h(dst: *mut i16, src: i16) { + asm!(".insn r 0x73, 0x4, 0x33, x0, {}, {}", in(reg) dst, in(reg) src, options(nostack)); +} + +/// Stores virtual machine memory by word integer +/// +/// This instruction performs an explicit memory access as though `V=1`; +/// i.e., with the address translation and protection, and the endianness, that apply to memory +/// accesses in either VS-mode or VU-mode. +/// +/// This function is unsafe for it accesses the virtual supervisor or user via a `HSV.W` +/// instruction which is effectively an unreference to any memory address. +#[inline] +pub unsafe fn hsv_w(dst: *mut i32, src: i32) { + asm!(".insn r 0x73, 0x4, 0x35, x0, {}, {}", in(reg) dst, in(reg) src, options(nostack)); +} + +/// Hypervisor memory management fence for given guest virtual address and guest address space +/// +/// Guarantees that any previous stores already visible to the current hart are ordered before all +/// implicit reads by that hart done for VS-stage address translation for instructions that: +/// - are subsequent to the `HFENCE.VVMA`, and +/// - execute when `hgatp.VMID` has the same setting as it did when `HFENCE.VVMA` executed. +/// +/// This fence specifies a single guest virtual address, and a single guest address-space identifier. +#[inline] +pub unsafe fn hfence_vvma(vaddr: usize, asid: usize) { + // asm!("hfence.vvma {}, {}", in(reg) vaddr, in(reg) asid) + asm!(".insn r 0x73, 0, 0x11, x0, {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) +} + +/// Hypervisor memory management fence for given guest virtual address +/// +/// Guarantees that any previous stores already visible to the current hart are ordered before all +/// implicit reads by that hart done for VS-stage address translation for instructions that: +/// - are subsequent to the `HFENCE.VVMA`, and +/// - execute when `hgatp.VMID` has the same setting as it did when `HFENCE.VVMA` executed. +/// +/// This fence specifies a single guest virtual address. +#[inline] +pub unsafe fn hfence_vvma_vaddr(vaddr: usize) { + asm!(".insn r 0x73, 0, 0x11, x0, {}, x0", in(reg) vaddr, options(nostack)) +} + +/// Hypervisor memory management fence for given guest address space +/// +/// Guarantees that any previous stores already visible to the current hart are ordered before all +/// implicit reads by that hart done for VS-stage address translation for instructions that: +/// - are subsequent to the `HFENCE.VVMA`, and +/// - execute when `hgatp.VMID` has the same setting as it did when `HFENCE.VVMA` executed. +/// +/// This fence specifies a single guest address-space identifier. +#[inline] +pub unsafe fn hfence_vvma_asid(asid: usize) { + asm!(".insn r 0x73, 0, 0x11, x0, x0, {}", in(reg) asid, options(nostack)) +} + +/// Hypervisor memory management fence for all guest address spaces and guest virtual addresses +/// +/// Guarantees that any previous stores already visible to the current hart are ordered before all +/// implicit reads by that hart done for VS-stage address translation for instructions that: +/// - are subsequent to the `HFENCE.VVMA`, and +/// - execute when `hgatp.VMID` has the same setting as it did when `HFENCE.VVMA` executed. +/// +/// This fence applies to any guest address spaces and guest virtual addresses. +#[inline] +pub unsafe fn hfence_vvma_all() { + asm!(".insn r 0x73, 0, 0x11, x0, x0, x0", options(nostack)) +} + +/// Hypervisor memory management fence for guest physical address and virtual machine +/// +/// Guarantees that any previous stores already visible to the current hart are ordered before all implicit reads +/// by that hart done for G-stage address translation for instructions that follow the HFENCE.GVMA. +/// +/// This fence specifies a single guest physical address, **shifted right by 2 bits**, and a single virtual machine +/// by virtual machine identifier (VMID). +#[inline] +pub unsafe fn hfence_gvma(gaddr: usize, vmid: usize) { + // asm!("hfence.gvma {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack)) + asm!(".insn r 0x73, 0, 0x31, x0, {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack)) +} + +/// Hypervisor memory management fence for guest physical address +/// +/// Guarantees that any previous stores already visible to the current hart are ordered before all implicit reads +/// by that hart done for G-stage address translation for instructions that follow the HFENCE.GVMA. +/// +/// This fence specifies a single guest physical address; **the physical address should be shifted right by 2 bits**. +#[inline] +pub unsafe fn hfence_gvma_gaddr(gaddr: usize) { + asm!(".insn r 0x73, 0, 0x31, x0, {}, x0", in(reg) gaddr, options(nostack)) +} + +/// Hypervisor memory management fence for given virtual machine +/// +/// Guarantees that any previous stores already visible to the current hart are ordered before all implicit reads +/// by that hart done for G-stage address translation for instructions that follow the HFENCE.GVMA. +/// +/// This fence specifies a single virtual machine by virtual machine identifier (VMID). +#[inline] +pub unsafe fn hfence_gvma_vmid(vmid: usize) { + asm!(".insn r 0x73, 0, 0x31, x0, x0, {}", in(reg) vmid, options(nostack)) +} + +/// Hypervisor memory management fence for all virtual machines and guest physical addresses +/// +/// Guarantees that any previous stores already visible to the current hart are ordered before all implicit reads +/// by that hart done for G-stage address translation for instructions that follow the HFENCE.GVMA. +/// +/// This fence specifies all guest physical addresses and all virtual machines. +#[inline] +pub unsafe fn hfence_gvma_all() { + asm!(".insn r 0x73, 0, 0x31, x0, x0, x0", options(nostack)) +} + +/// Invalidate hypervisor translation cache for given guest virtual address and guest address space +/// +/// This instruction invalidates any address-translation cache entries that an +/// `HFENCE.VVMA` instruction with the same values of `vaddr` and `asid` would invalidate. +/// +/// This fence specifies a single guest virtual address, and a single guest address-space identifier. +#[inline] +pub unsafe fn hinval_vvma(vaddr: usize, asid: usize) { + // asm!("hinval.vvma {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) + asm!(".insn r 0x73, 0, 0x13, x0, {}, {}", in(reg) vaddr, in(reg) asid, options(nostack)) +} + +/// Invalidate hypervisor translation cache for given guest virtual address +/// +/// This instruction invalidates any address-translation cache entries that an +/// `HFENCE.VVMA` instruction with the same values of `vaddr` and `asid` would invalidate. +/// +/// This fence specifies a single guest virtual address. +#[inline] +pub unsafe fn hinval_vvma_vaddr(vaddr: usize) { + asm!(".insn r 0x73, 0, 0x13, x0, {}, x0", in(reg) vaddr, options(nostack)) +} + +/// Invalidate hypervisor translation cache for given guest address space +/// +/// This instruction invalidates any address-translation cache entries that an +/// `HFENCE.VVMA` instruction with the same values of `vaddr` and `asid` would invalidate. +/// +/// This fence specifies a single guest address-space identifier. +#[inline] +pub unsafe fn hinval_vvma_asid(asid: usize) { + asm!(".insn r 0x73, 0, 0x13, x0, x0, {}", in(reg) asid, options(nostack)) +} + +/// Invalidate hypervisor translation cache for all guest address spaces and guest virtual addresses +/// +/// This instruction invalidates any address-translation cache entries that an +/// `HFENCE.VVMA` instruction with the same values of `vaddr` and `asid` would invalidate. +/// +/// This fence applies to any guest address spaces and guest virtual addresses. +#[inline] +pub unsafe fn hinval_vvma_all() { + asm!(".insn r 0x73, 0, 0x13, x0, x0, x0", options(nostack)) +} + +/// Invalidate hypervisor translation cache for guest physical address and virtual machine +/// +/// This instruction invalidates any address-translation cache entries that an +/// `HFENCE.GVMA` instruction with the same values of `gaddr` and `vmid` would invalidate. +/// +/// This fence specifies a single guest physical address, **shifted right by 2 bits**, and a single virtual machine +/// by virtual machine identifier (VMID). +#[inline] +pub unsafe fn hinval_gvma(gaddr: usize, vmid: usize) { + // asm!("hinval.gvma {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack)) + asm!(".insn r 0x73, 0, 0x33, x0, {}, {}", in(reg) gaddr, in(reg) vmid, options(nostack)) +} + +/// Invalidate hypervisor translation cache for guest physical address +/// +/// This instruction invalidates any address-translation cache entries that an +/// `HFENCE.GVMA` instruction with the same values of `gaddr` and `vmid` would invalidate. +/// +/// This fence specifies a single guest physical address; **the physical address should be shifted right by 2 bits**. +#[inline] +pub unsafe fn hinval_gvma_gaddr(gaddr: usize) { + asm!(".insn r 0x73, 0, 0x33, x0, {}, x0", in(reg) gaddr, options(nostack)) +} + +/// Invalidate hypervisor translation cache for given virtual machine +/// +/// This instruction invalidates any address-translation cache entries that an +/// `HFENCE.GVMA` instruction with the same values of `gaddr` and `vmid` would invalidate. +/// +/// This fence specifies a single virtual machine by virtual machine identifier (VMID). +#[inline] +pub unsafe fn hinval_gvma_vmid(vmid: usize) { + asm!(".insn r 0x73, 0, 0x33, x0, x0, {}", in(reg) vmid, options(nostack)) +} + +/// Invalidate hypervisor translation cache for all virtual machines and guest physical addresses +/// +/// This instruction invalidates any address-translation cache entries that an +/// `HFENCE.GVMA` instruction with the same values of `gaddr` and `vmid` would invalidate. +/// +/// This fence specifies all guest physical addresses and all virtual machines. +#[inline] +pub unsafe fn hinval_gvma_all() { + asm!(".insn r 0x73, 0, 0x33, x0, x0, x0", options(nostack)) +}