diff --git a/fuzzers/frida_gdiplus/Cargo.toml b/fuzzers/frida_gdiplus/Cargo.toml index e70b93eba8..39b8701ade 100644 --- a/fuzzers/frida_gdiplus/Cargo.toml +++ b/fuzzers/frida_gdiplus/Cargo.toml @@ -24,12 +24,15 @@ tar = "0.4.37" reqwest = { version = "0.11.4", features = ["blocking"] } [dependencies] -libafl = { path = "../../libafl/", features = [ "std", "llmp_compression", "llmp_bind_public", "frida_cli" ] } #, "llmp_small_maps", "llmp_debug"]} +libafl = { path = "../../libafl/", features = [ "std", "llmp_compression", + "llmp_bind_public", "frida_cli", "errors_backtrace" ] } #, "llmp_small_maps", "llmp_debug"]} libafl_bolts = { path = "../../libafl_bolts/" } frida-gum = { version = "0.13.6", features = [ "auto-download", "event-sink", "invocation-listener"] } libafl_frida = { path = "../../libafl_frida", features = ["cmplog"] } libafl_targets = { path = "../../libafl_targets", features = ["sancov_cmplog"] } libloading = "0.7" mimalloc = { version = "*", default-features = false } +dlmalloc ={version = "0.2.6", features = ["global"]} color-backtrace = "0.5" +env_logger = "0.10.0" iced-x86 = { version = "1.20.0", features = ["code_asm"] } diff --git a/fuzzers/frida_gdiplus/cargo/.config b/fuzzers/frida_gdiplus/cargo/.config new file mode 100644 index 0000000000..33806ae7fb --- /dev/null +++ b/fuzzers/frida_gdiplus/cargo/.config @@ -0,0 +1,2 @@ +[build] +target = "x86_64-pc-windows-msvc" diff --git a/fuzzers/frida_gdiplus/harness.cc b/fuzzers/frida_gdiplus/harness.cc index e6f9836f3b..7831fa7976 100644 --- a/fuzzers/frida_gdiplus/harness.cc +++ b/fuzzers/frida_gdiplus/harness.cc @@ -21,8 +21,13 @@ ULONG_PTR gdiplusToken; BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { switch (fdwReason) { case DLL_PROCESS_ATTACH: + LoadLibraryA("ole32.dll"); LoadLibraryA("gdi32full.dll"); LoadLibraryA("WindowsCodecs.dll"); + LoadLibraryA("shcore.dll"); + GdiplusStartup(&gdiplusToken, &gdiplusStartupInput, NULL); + LoadLibraryA("gdi32.dll"); + // DebugBreak(); break; } return TRUE; @@ -31,16 +36,16 @@ BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) { extern "C" __declspec(dllexport) int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { static DWORD init = 0; - if (!init) { - GdiplusStartup(&gdiplusToken, &gdiplusStartupInput, NULL); - init = 1; - } + // if (!init) { + // init = 1; + // } HGLOBAL m_hBuffer = ::GlobalAlloc(GMEM_MOVEABLE, size); if (m_hBuffer) { void *pBuffer = ::GlobalLock(m_hBuffer); if (pBuffer) { - CopyMemory(pBuffer, data, size); + memcpy(pBuffer, data, size); + // CopyMemory(pBuffer, data, size); IStream *pStream = NULL; if (::CreateStreamOnHGlobal(m_hBuffer, FALSE, &pStream) == S_OK) { diff --git a/fuzzers/frida_gdiplus/src/fuzzer.rs b/fuzzers/frida_gdiplus/src/fuzzer.rs index de2b3399bb..52b4361463 100644 --- a/fuzzers/frida_gdiplus/src/fuzzer.rs +++ b/fuzzers/frida_gdiplus/src/fuzzer.rs @@ -6,9 +6,16 @@ //! going to make it compilable only for Windows, don't forget to modify the //! `scripts/test_fuzzer.sh` to opt-out this fuzzer from that test. +#[cfg(unix)] use mimalloc::MiMalloc; +#[cfg(unix)] #[global_allocator] static GLOBAL: MiMalloc = MiMalloc; +#[cfg(windows)] +use dlmalloc::GlobalDlmalloc; +#[cfg(windows)] +#[global_allocator] +static GLOBAL: GlobalDlmalloc = GlobalDlmalloc; use std::path::PathBuf; @@ -17,8 +24,8 @@ use libafl::{ corpus::{CachedOnDiskCorpus, Corpus, OnDiskCorpus}, events::{launcher::Launcher, llmp::LlmpRestartingEventManager, EventConfig}, executors::{inprocess::InProcessExecutor, ExitKind, ShadowExecutor}, - feedback_or, feedback_or_fast, - feedbacks::{CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, + feedback_and_fast, feedback_or, feedback_or_fast, + feedbacks::{ConstFeedback, CrashFeedback, MaxMapFeedback, TimeFeedback, TimeoutFeedback}, fuzzer::{Fuzzer, StdFuzzer}, inputs::{BytesInput, HasTargetBytes}, monitors::MultiMonitor, @@ -32,8 +39,6 @@ use libafl::{ state::{HasCorpus, StdState}, Error, HasMetadata, }; -#[cfg(unix)] -use libafl::{feedback_and_fast, feedbacks::ConstFeedback}; use libafl_bolts::{ cli::{parse_args, FuzzerOptions}, rands::StdRand, @@ -41,11 +46,11 @@ use libafl_bolts::{ tuples::{tuple_list, Merge}, AsSlice, }; -#[cfg(unix)] -use libafl_frida::asan::asan_rt::AsanRuntime; -#[cfg(unix)] -use libafl_frida::asan::errors::{AsanErrorsFeedback, AsanErrorsObserver}; use libafl_frida::{ + asan::{ + asan_rt::AsanRuntime, + errors::{AsanErrorsFeedback, AsanErrorsObserver}, + }, cmplog_rt::CmpLogRuntime, coverage_rt::{CoverageRuntime, MAP_SIZE}, executor::FridaInProcessExecutor, @@ -55,6 +60,7 @@ use libafl_targets::cmplog::CmpLogObserver; /// The main fn, usually parsing parameters, and starting the fuzzer pub fn main() { + env_logger::init(); color_backtrace::install(); let options = parse_args(); @@ -97,16 +103,11 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { let gum = Gum::obtain(); let coverage = CoverageRuntime::new(); - #[cfg(unix)] - let asan = AsanRuntime::new(options); + let asan = AsanRuntime::new(&options); - #[cfg(unix)] let mut frida_helper = FridaInstrumentationHelper::new(&gum, options, tuple_list!(coverage, asan)); - #[cfg(windows)] - let mut frida_helper = - FridaInstrumentationHelper::new(&gum, options, tuple_list!(coverage)); - + // // Create an observation channel using the coverage map let edges_observer = HitcountsMapObserver::new(StdMapObserver::from_mut_ptr( "edges", @@ -118,7 +119,6 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { // Create an observation channel to keep track of the execution time let time_observer = TimeObserver::new("time"); - #[cfg(unix)] let asan_observer = AsanErrorsObserver::from_static_asan_errors(); // Feedback to rate the interestingness of an input @@ -131,18 +131,15 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { ); // Feedbacks to recognize an input as solution - #[cfg(unix)] let mut objective = feedback_or_fast!( CrashFeedback::new(), - TimeoutFeedback::new(), + // TimeoutFeedback::new(), // true enables the AsanErrorFeedback feedback_and_fast!( ConstFeedback::from(true), AsanErrorsFeedback::new(&asan_observer) ) ); - #[cfg(windows)] - let mut objective = feedback_or_fast!(CrashFeedback::new(), TimeoutFeedback::new()); // If not restarting, create a State from scratch let mut state = state.unwrap_or_else(|| { @@ -183,20 +180,18 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { // A fuzzer with feedbacks and a corpus scheduler let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); - #[cfg(unix)] - let observers = tuple_list!(edges_observer, time_observer, asan_observer); - #[cfg(windows)] - let observers = tuple_list!(edges_observer, time_observer); + let observers = tuple_list!(edges_observer, time_observer, asan_observer,); // Create the executor for an in-process function with just one observer for edge coverage let mut executor = FridaInProcessExecutor::new( &gum, - InProcessExecutor::new( + InProcessExecutor::with_timeout( &mut frida_harness, observers, &mut fuzzer, &mut state, &mut mgr, + options.timeout, )?, &mut frida_helper, ); @@ -237,7 +232,6 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { // Create an observation channel to keep track of the execution time let time_observer = TimeObserver::new("time"); - #[cfg(unix)] let asan_observer = AsanErrorsObserver::from_static_asan_errors(); // Feedback to rate the interestingness of an input @@ -249,7 +243,6 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { TimeFeedback::new(&time_observer) ); - #[cfg(unix)] let mut objective = feedback_or_fast!( CrashFeedback::new(), TimeoutFeedback::new(), @@ -258,8 +251,6 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { AsanErrorsFeedback::new(&asan_observer) ) ); - #[cfg(windows)] - let mut objective = feedback_or_fast!(CrashFeedback::new(), TimeoutFeedback::new()); // If not restarting, create a State from scratch let mut state = state.unwrap_or_else(|| { @@ -301,10 +292,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { // A fuzzer with feedbacks and a corpus scheduler let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); - #[cfg(unix)] let observers = tuple_list!(edges_observer, time_observer, asan_observer); - #[cfg(windows)] - let observers = tuple_list!(edges_observer, time_observer,); // Create the executor for an in-process function with just one observer for edge coverage let mut executor = FridaInProcessExecutor::new( @@ -372,7 +360,6 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { // Create an observation channel to keep track of the execution time let time_observer = TimeObserver::new("time"); - #[cfg(unix)] let asan_observer = AsanErrorsObserver::from_static_asan_errors(); // Feedback to rate the interestingness of an input @@ -384,7 +371,6 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { TimeFeedback::new(&time_observer) ); - #[cfg(unix)] let mut objective = feedback_or_fast!( CrashFeedback::new(), TimeoutFeedback::new(), @@ -393,8 +379,6 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { AsanErrorsFeedback::new(&asan_observer) ) ); - #[cfg(windows)] - let mut objective = feedback_or_fast!(CrashFeedback::new(), TimeoutFeedback::new()); // If not restarting, create a State from scratch let mut state = state.unwrap_or_else(|| { @@ -436,20 +420,18 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { // A fuzzer with feedbacks and a corpus scheduler let mut fuzzer = StdFuzzer::new(scheduler, feedback, objective); - #[cfg(unix)] let observers = tuple_list!(edges_observer, time_observer, asan_observer); - #[cfg(windows)] - let observers = tuple_list!(edges_observer, time_observer); // Create the executor for an in-process function with just one observer for edge coverage let mut executor = FridaInProcessExecutor::new( &gum, - InProcessExecutor::new( + InProcessExecutor::with_timeout( &mut frida_harness, observers, &mut fuzzer, &mut state, &mut mgr, + options.timeout, )?, &mut frida_helper, ); @@ -466,7 +448,9 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { let mut stages = tuple_list!(StdMutationalStage::new(mutator)); - fuzzer.fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr)?; + fuzzer + .fuzz_loop(&mut stages, &mut executor, &mut state, &mut mgr) + .unwrap(); Ok(()) })(state, mgr, core_id) diff --git a/fuzzers/frida_libpng/Cargo.toml b/fuzzers/frida_libpng/Cargo.toml index 43d66572a2..8c9ce1850a 100644 --- a/fuzzers/frida_libpng/Cargo.toml +++ b/fuzzers/frida_libpng/Cargo.toml @@ -26,7 +26,8 @@ reqwest = { version = "0.11.4", features = ["blocking"] } [dependencies] -libafl = { path = "../../libafl/", features = [ "std", "llmp_compression", "llmp_bind_public", "frida_cli", "errors_backtrace" ] } #, "llmp_small_maps", "llmp_debug"]} +libafl = { path = "../../libafl/", features = [ "std", "llmp_compression", + "llmp_bind_public", "frida_cli", "errors_backtrace" ] } #, "llmp_small_maps", "llmp_debug"]} libafl_bolts = { path = "../../libafl_bolts/" } frida-gum = { version = "0.13.6", features = [ "auto-download", "event-sink", "invocation-listener"] } libafl_frida = { path = "../../libafl_frida", features = ["cmplog"] } @@ -34,3 +35,5 @@ libafl_targets = { path = "../../libafl_targets", features = ["sancov_cmplog"] } libloading = "0.7" mimalloc = { version = "*", default-features = false } color-backtrace = "0.5" +log = "0.4.20" +env_logger = "0.10.0" diff --git a/fuzzers/frida_libpng/harness.cc b/fuzzers/frida_libpng/harness.cc index 4c3a7b1aa3..6268a6c8da 100644 --- a/fuzzers/frida_libpng/harness.cc +++ b/fuzzers/frida_libpng/harness.cc @@ -88,7 +88,7 @@ static char *allocation = NULL; __attribute__((noinline)) void func3(char *alloc) { // printf("func3\n"); #ifdef _WIN32 - if (rand() == 0) { + if ((rand() % 2) == 0) { alloc[0x1ff] = 0xde; printf("alloc[0x200]: %d\n", alloc[0x200]); } diff --git a/fuzzers/frida_libpng/src/fuzzer.rs b/fuzzers/frida_libpng/src/fuzzer.rs index b469b46aa7..63c8453767 100644 --- a/fuzzers/frida_libpng/src/fuzzer.rs +++ b/fuzzers/frida_libpng/src/fuzzer.rs @@ -50,8 +50,8 @@ static GLOBAL: MiMalloc = MiMalloc; /// The main fn, usually parsing parameters, and starting the fuzzer pub fn main() { + env_logger::init(); color_backtrace::install(); - let options = parse_args(); unsafe { @@ -65,6 +65,8 @@ pub fn main() { /// The actual fuzzer #[allow(clippy::too_many_lines, clippy::too_many_arguments)] unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { + log::info!("Frida fuzzer starting up."); + // 'While the stats are state, they are usually used in the broker - which is likely never restarted let monitor = MultiMonitor::new(|s| println!("{s}")); @@ -97,7 +99,7 @@ unsafe fn fuzz(options: &FuzzerOptions) -> Result<(), Error> { #[cfg(unix)] let mut frida_helper = - FridaInstrumentationHelper::new(&gum, options, tuple_list!(coverage, asan)); + FridaInstrumentationHelper::new(&gum, options, tuple_list!(asan, coverage)); #[cfg(windows)] let mut frida_helper = FridaInstrumentationHelper::new(&gum, &options, tuple_list!(coverage)); diff --git a/libafl/src/executors/hooks/windows.rs b/libafl/src/executors/hooks/windows.rs index 660661e4ff..d771f1d0ad 100644 --- a/libafl/src/executors/hooks/windows.rs +++ b/libafl/src/executors/hooks/windows.rs @@ -113,6 +113,8 @@ pub mod windows_exception_handler { sync::atomic::{compiler_fence, Ordering}, }; #[cfg(feature = "std")] + use std::io::Write; + #[cfg(feature = "std")] use std::panic; use libafl_bolts::os::windows_exceptions::{ @@ -131,7 +133,7 @@ pub mod windows_exception_handler { }, feedbacks::Feedback, fuzzer::HasObjective, - inputs::UsesInput, + inputs::{Input, UsesInput}, state::{HasCorpus, HasExecutions, HasSolutions, State}, }; @@ -394,7 +396,17 @@ pub mod windows_exception_handler { // Make sure we don't crash in the crash handler forever. if is_crash { let input = data.take_current_input::<::Input>(); - + { + let mut bsod = Vec::new(); + { + let mut writer = std::io::BufWriter::new(&mut bsod); + writeln!(writer, "input: {:?}", input.generate_name(0)).unwrap(); + libafl_bolts::minibsod::generate_minibsod(&mut writer, exception_pointers) + .unwrap(); + writer.flush().unwrap(); + } + log::error!("{}", std::str::from_utf8(&bsod).unwrap()); + } run_observers_and_save_state::( executor, state, diff --git a/libafl_bolts/src/lib.rs b/libafl_bolts/src/lib.rs index 98ae33be5d..d563a6b29d 100644 --- a/libafl_bolts/src/lib.rs +++ b/libafl_bolts/src/lib.rs @@ -122,7 +122,7 @@ pub mod fs; #[cfg(feature = "alloc")] pub mod llmp; pub mod math; -#[cfg(all(feature = "std", unix))] +#[cfg(feature = "std")] pub mod minibsod; pub mod os; #[cfg(feature = "alloc")] diff --git a/libafl_bolts/src/minibsod.rs b/libafl_bolts/src/minibsod.rs index 1e5e3eb423..4b4b4f2166 100644 --- a/libafl_bolts/src/minibsod.rs +++ b/libafl_bolts/src/minibsod.rs @@ -1,7 +1,5 @@ //! Implements a mini-bsod generator. //! It dumps all important registers and prints a stacktrace. -//! You may use the [`crate::os::unix_signals::ucontext`] -//! function to get a [`ucontext_t`]. #[cfg(target_vendor = "apple")] use core::mem::size_of; @@ -9,6 +7,7 @@ use std::io::{BufWriter, Write}; #[cfg(any(target_os = "solaris", target_os = "illumos"))] use std::process::Command; +#[cfg(unix)] use libc::siginfo_t; #[cfg(target_vendor = "apple")] use mach::{ @@ -19,7 +18,10 @@ use mach::{ vm_region::{vm_region_recurse_info_t, vm_region_submap_info_64}, vm_types::{mach_vm_address_t, mach_vm_size_t, natural_t}, }; +#[cfg(windows)] +use windows::Win32::System::Diagnostics::Debug::{CONTEXT, EXCEPTION_POINTERS}; +#[cfg(unix)] use crate::os::unix_signals::{ucontext_t, Signal}; /// Write the content of all important registers @@ -391,7 +393,7 @@ pub fn dump_registers( write!(writer, "cs : {:#016x}, ", ucontext.sc_cs)?; Ok(()) } -/// + /// Write the content of all important registers #[cfg(all(target_os = "openbsd", target_arch = "aarch64"))] #[allow(clippy::similar_names)] @@ -452,6 +454,34 @@ pub fn dump_registers( } /// Write the content of all important registers +#[cfg(windows)] +#[allow(clippy::similar_names)] +pub fn dump_registers( + writer: &mut BufWriter, + context: &CONTEXT, +) -> Result<(), std::io::Error> { + write!(writer, "r8 : {:#016x}, ", context.R8)?; + write!(writer, "r9 : {:#016x}, ", context.R9)?; + write!(writer, "r10: {:#016x}, ", context.R10)?; + writeln!(writer, "r11: {:#016x}, ", context.R11)?; + write!(writer, "r12: {:#016x}, ", context.R12)?; + write!(writer, "r13: {:#016x}, ", context.R13)?; + write!(writer, "r14: {:#016x}, ", context.R14)?; + writeln!(writer, "r15: {:#016x}, ", context.R15)?; + write!(writer, "rdi: {:#016x}, ", context.Rdi)?; + write!(writer, "rsi: {:#016x}, ", context.Rsi)?; + write!(writer, "rbp: {:#016x}, ", context.Rbp)?; + writeln!(writer, "rbx: {:#016x}, ", context.Rbx)?; + write!(writer, "rdx: {:#016x}, ", context.Rdx)?; + write!(writer, "rax: {:#016x}, ", context.Rax)?; + write!(writer, "rcx: {:#016x}, ", context.Rcx)?; + writeln!(writer, "rsp: {:#016x}, ", context.Rsp)?; + write!(writer, "rip: {:#016x}, ", context.Rip)?; + writeln!(writer, "efl: {:#016x}, ", context.EFlags)?; + + Ok(()) +} + #[cfg(all(target_os = "haiku", target_arch = "x86_64"))] #[allow(clippy::similar_names)] pub fn dump_registers( @@ -489,6 +519,7 @@ pub fn dump_registers( target_os = "dragonfly", target_os = "netbsd", target_os = "openbsd", + windows, target_os = "haiku", any(target_os = "solaris", target_os = "illumos"), )))] @@ -751,6 +782,7 @@ fn write_crash( target_os = "dragonfly", target_os = "openbsd", target_os = "netbsd", + windows, target_os = "haiku", any(target_os = "solaris", target_os = "illumos"), )))] @@ -765,6 +797,33 @@ fn write_crash( Ok(()) } +#[cfg(windows)] +fn write_crash( + writer: &mut BufWriter, + exception_pointers: *mut EXCEPTION_POINTERS, +) -> Result<(), std::io::Error> { + // TODO add fault addr for other platforms. + unsafe { + writeln!( + writer, + "Received exception {:0x} at address {:x}", + (*exception_pointers) + .ExceptionRecord + .as_mut() + .unwrap() + .ExceptionCode + .0, + (*exception_pointers) + .ExceptionRecord + .as_mut() + .unwrap() + .ExceptionAddress as usize + ) + }?; + + Ok(()) +} + #[cfg(any(target_os = "linux", target_os = "android"))] fn write_minibsod(writer: &mut BufWriter) -> Result<(), std::io::Error> { match std::fs::read_to_string("/proc/self/maps") { @@ -1023,6 +1082,30 @@ pub fn generate_minibsod( write_minibsod(writer) } +/// Generates a mini-BSOD given an `EXCEPTION_POINTERS` structure. +#[cfg(windows)] +#[allow( + clippy::non_ascii_literal, + clippy::too_many_lines, + clippy::not_unsafe_ptr_arg_deref +)] +pub fn generate_minibsod( + writer: &mut BufWriter, + exception_pointers: *mut EXCEPTION_POINTERS, +) -> Result<(), std::io::Error> { + writeln!(writer, "{:━^100}", " CRASH ")?; + write_crash(writer, exception_pointers)?; + writeln!(writer, "{:━^100}", " REGISTERS ")?; + dump_registers(writer, unsafe { + (*exception_pointers).ContextRecord.as_mut().unwrap() + })?; + writeln!(writer, "{:━^100}", " BACKTRACE ")?; + writeln!(writer, "{:?}", backtrace::Backtrace::new())?; + writeln!(writer, "{:━^100}", " MAPS ")?; + write_minibsod(writer) +} + +#[cfg(unix)] #[cfg(test)] mod tests { diff --git a/libafl_bolts/src/os/windows_exceptions.rs b/libafl_bolts/src/os/windows_exceptions.rs index b7dca5952c..04b8d2cc12 100644 --- a/libafl_bolts/src/os/windows_exceptions.rs +++ b/libafl_bolts/src/os/windows_exceptions.rs @@ -35,7 +35,7 @@ const EXCEPTION_CONTINUE_EXECUTION: c_long = -1; const EXCEPTION_CONTINUE_SEARCH: c_long = 0; // For SEH -//const EXCEPTION_EXECUTE_HANDLER: c_long = 1; +// const EXCEPTION_EXECUTE_HANDLER: c_long = 1; // From https://github.com/Alexpux/mingw-w64/blob/master/mingw-w64-headers/crt/signal.h pub const SIGINT: i32 = 2; diff --git a/libafl_frida/Cargo.toml b/libafl_frida/Cargo.toml index b50cb62464..fb06651e3c 100644 --- a/libafl_frida/Cargo.toml +++ b/libafl_frida/Cargo.toml @@ -80,9 +80,13 @@ ahash = "0.8" paste = "1.0" log = "0.4.20" mmap-rs = "0.6.0" - +bit_reverse = "0.1.8" yaxpeax-arch = "0.2.7" +[target.'cfg(windows)'.dependencies] +winsafe = {version = "0.0.18", features = ["kernel"]} + + [dev-dependencies] serial_test = { version = "3", default-features = false, features = ["logging"] } clap = {version = "4.5", features = ["derive"]} diff --git a/libafl_frida/build.rs b/libafl_frida/build.rs index 5d23b239fb..3d54b87a5e 100644 --- a/libafl_frida/build.rs +++ b/libafl_frida/build.rs @@ -7,34 +7,59 @@ fn main() { cc::Build::new().file("src/gettls.c").compile("libgettls.a"); } + let target_family = std::env::var("CARGO_CFG_TARGET_FAMILY").unwrap(); // Force linking against libc++ - #[cfg(unix)] - println!("cargo:rustc-link-lib=dylib=c++"); + if target_family == "unix" { + println!("cargo:rustc-link-lib=dylib=c++"); + } + println!("cargo:rerun-if-changed=build.rs"); + println!("cargo:rerun-if-changed=test_harness.cpp"); + println!("cargo:rerun-if-changed=src/gettls.c"); // Build the test harness // clang++ -shared -fPIC -O0 -o test_harness.so test_harness.cpp - #[cfg(unix)] - { - // Check if we have clang++ installed - let clangpp = std::process::Command::new("clang++") - .arg("--version") - .output(); + // Check if we have clang++ installed + + if target_family == "windows" { + let compiler = cc::Build::new() + .cpp(true) + .file("test_harness.a") + .get_compiler(); + let mut cmd = std::process::Command::new(compiler.path()); + let cmd = cmd + .args(compiler.args()) + .arg("test_harness.cpp") + .arg("/link"); - match clangpp { - Ok(_) => { - std::process::Command::new("clang++") - .arg("-shared") - .arg("-fPIC") - .arg("-O0") - .arg("-o") - .arg("test_harness.so") - .arg("test_harness.cpp") - .status() - .expect("Failed to build test harness"); - } - Err(_) => { - println!("cargo:warning=clang++ not found, skipping test harness build"); - } - } + #[cfg(unix)] + let cmd = cmd + .arg(format!( + "/libpath:{}/.cache/cargo-xwin/xwin/crt/lib/x86_64/", + std::env::var("HOME").unwrap() + )) + .arg(format!( + "/libpath:{}/.cache/cargo-xwin/xwin/sdk/lib/ucrt/x86_64/", + std::env::var("HOME").unwrap() + )) + .arg(format!( + "/libpath:{}/.cache/cargo-xwin/xwin/sdk/lib/um/x86_64/", + std::env::var("HOME").unwrap() + )); + cmd.arg("/dll").arg("/OUT:test_harness.dll"); + cmd.status().expect("Failed to link test_harness.dll"); + } else { + let compiler = cc::Build::new() + .cpp(true) + .opt_level(0) + .shared_flag(true) + .get_compiler(); + let clangpp = compiler.path(); + let mut cmd = std::process::Command::new(clangpp); + cmd.args(compiler.args()) + .arg("test_harness.cpp") + .arg("-o") + .arg("test_harness.so") + .status() + .expect("Failed to link test_harness"); } } diff --git a/libafl_frida/src/alloc.rs b/libafl_frida/src/alloc.rs index 6ce806ba60..adc18b9834 100644 --- a/libafl_frida/src/alloc.rs +++ b/libafl_frida/src/alloc.rs @@ -1,4 +1,5 @@ #[cfg(any( + windows, target_os = "linux", target_vendor = "apple", all( @@ -13,6 +14,7 @@ use frida_gum::{PageProtection, RangeDetails}; use hashbrown::HashMap; use libafl_bolts::cli::FuzzerOptions; #[cfg(any( + windows, target_os = "linux", target_vendor = "apple", all( @@ -20,8 +22,7 @@ use libafl_bolts::cli::FuzzerOptions; target_os = "android" ) ))] -use mmap_rs::{MemoryAreas, MmapFlags, MmapMut, MmapOptions, ReservedMut}; -use nix::libc::memset; +use mmap_rs::{MmapFlags, MmapMut, MmapOptions, ReservedMut}; use rangemap::RangeSet; use serde::{Deserialize, Serialize}; @@ -41,7 +42,9 @@ pub struct Allocator { /// The shadow bit shadow_bit: usize, /// The reserved (pre-allocated) shadow mapping - pre_allocated_shadow_mappings: HashMap<(usize, usize), ReservedMut>, + pre_allocated_shadow_mappings: Vec, + /// Whether we've pre allocated a shadow mapping: + using_pre_allocated_shadow_mapping: bool, /// All tracked allocations allocations: HashMap, /// All mappings @@ -157,7 +160,6 @@ impl Allocator { pub unsafe fn alloc(&mut self, size: usize, _alignment: usize) -> *mut c_void { let mut is_malloc_zero = false; let size = if size == 0 { - // log::warn!("zero-sized allocation!"); is_malloc_zero = true; 16 } else { @@ -179,7 +181,6 @@ impl Allocator { self.total_allocation_size += rounded_up_size; let metadata = if let Some(mut metadata) = self.find_smallest_fit(rounded_up_size) { - //log::trace!("reusing allocation at {:x}, (actual mapping starts at {:x}) size {:x}", metadata.address, metadata.address - self.page_size, size); metadata.is_malloc_zero = is_malloc_zero; metadata.size = size; if self.allocation_backtraces { @@ -234,14 +235,13 @@ impl Allocator { let address = (metadata.address + self.page_size) as *mut c_void; self.allocations.insert(address as usize, metadata); - // log::trace!("serving address: {:?}, size: {:x}", address, size); + log::trace!("serving address: {:?}, size: {:x}", address, size); address } /// Releases the allocation at the given address. #[allow(clippy::missing_safety_doc)] pub unsafe fn release(&mut self, ptr: *mut c_void) { - //log::trace!("freeing address: {:?}", ptr); let Some(metadata) = self.allocations.get_mut(&(ptr as usize)) else { if !ptr.is_null() { AsanErrors::get_mut_blocking() @@ -339,34 +339,30 @@ impl Allocator { } fn unpoison(start: usize, size: usize) { - // log::trace!("unpoisoning {:x} for {:x}", start, size / 8 + 1); unsafe { - // log::trace!("memset: {:?}", start as *mut c_void); - memset(start as *mut c_void, 0xff, size / 8); + std::slice::from_raw_parts_mut(start as *mut u8, size / 8).fill(0xff); let remainder = size % 8; if remainder > 0 { - // log::trace!("remainder: {:x}, offset: {:x}", remainder, start + size / 8); - memset( - (start + size / 8) as *mut c_void, - (0xff << (8 - remainder)) & 0xff, - 1, - ); + let mut current_value = ((start + size / 8) as *const u8).read(); + current_value |= 0xff << (8 - remainder); + ((start + size / 8) as *mut u8).write(current_value); } } } /// Poisonn an area in memory pub fn poison(start: usize, size: usize) { - // log::trace!("poisoning {:x} for {:x}", start, size / 8 + 1); unsafe { - // log::trace!("memset: {:?}", start as *mut c_void); - memset(start as *mut c_void, 0x00, size / 8); + std::slice::from_raw_parts_mut(start as *mut u8, size / 8).fill(0x0); let remainder = size % 8; if remainder > 0 { - // log::trace!("remainder: {:x}, offset: {:x}", remainder, start + size / 8); - memset((start + size / 8) as *mut c_void, 0x00, 1); + let mask = !(0xff << (8 - remainder)); + let mut current_value = ((start + size / 8) as *const u8).read(); + + current_value &= mask; + ((start + size / 8) as *mut u8).write(current_value); } } } @@ -381,87 +377,154 @@ impl Allocator { let shadow_mapping_start = map_to_shadow!(self, start); let shadow_start = self.round_down_to_page(shadow_mapping_start); - // I'm not sure this works as planned. The same address appearing as start and end is mapped to - // different addresses. - let shadow_end = self.round_up_to_page((end - start) / 8) + self.page_size + shadow_start; - log::trace!( - "map_shadow_for_region start: {:x}, end {:x}, size {:x}, shadow {:x}-{:x}", - start, - end, - end - start, - shadow_start, - shadow_end - ); - if self.pre_allocated_shadow_mappings.is_empty() { - for range in self.shadow_pages.gaps(&(shadow_start..shadow_end)) { - /* - log::trace!( - "range: {:x}-{:x}, pagesize: {}", - range.start, range.end, self.page_size - ); - */ - let mapping = MmapOptions::new(range.end - range.start - 1) - .unwrap() - .with_address(range.start) - .map_mut() - .expect("An error occurred while mapping shadow memory"); - - self.mappings.insert(range.start, mapping); - } - - log::trace!("adding shadow pages {:x} - {:x}", shadow_start, shadow_end); - self.shadow_pages.insert(shadow_start..shadow_end); - } else { - let mut new_shadow_mappings = Vec::new(); + let shadow_end = self.round_up_to_page((end - start) / 8 + self.page_size + shadow_start); + if self.using_pre_allocated_shadow_mapping { + let mut newly_committed_regions = Vec::new(); for gap in self.shadow_pages.gaps(&(shadow_start..shadow_end)) { - for ((pa_start, pa_end), shadow_mapping) in &mut self.pre_allocated_shadow_mappings - { - if *pa_start <= gap.start && gap.start < *pa_start + shadow_mapping.len() { - log::trace!("pa_start: {:x}, pa_end {:x}, gap.start {:x}, shadow_mapping.ptr {:x}, shadow_mapping.len {:x}", - *pa_start, *pa_end, gap.start, shadow_mapping.as_ptr() as usize, shadow_mapping.len()); - - // Split the preallocated mapping into two parts, keeping the - // part before the gap and returning the part starting with the gap as a new mapping - let mut start_mapping = - shadow_mapping.split_off(gap.start - *pa_start).unwrap(); - - // Split the new mapping into two parts, - // keeping the part holding the gap and returning the part starting after the gap as a new mapping - let end_mapping = start_mapping.split_off(gap.end - gap.start).unwrap(); - - //Push the new after-the-gap mapping to the list of mappings to be added - new_shadow_mappings.push(((gap.end, *pa_end), end_mapping)); - - // Insert the new gap mapping into the list of mappings - self.mappings - .insert(gap.start, start_mapping.try_into().unwrap()); - + let mut new_reserved_region = None; + for reserved in &mut self.pre_allocated_shadow_mappings { + if gap.start >= reserved.start() && gap.end <= reserved.end() { + let mut to_be_commited = + reserved.split_off(gap.start - reserved.start()).unwrap(); + + if to_be_commited.end() > gap.end { + let upper = to_be_commited + .split_off(gap.end - to_be_commited.start()) + .unwrap(); + new_reserved_region = Some(upper); + } + let commited: MmapMut = to_be_commited + .try_into() + .expect("Failed to commit reserved shadow memory"); + newly_committed_regions.push(commited); break; } } - } - for new_shadow_mapping in new_shadow_mappings { - log::trace!( - "adding pre_allocated_shadow_mappings and shadow pages {:x} - {:x}", - new_shadow_mapping.0 .0, - new_shadow_mapping.0 .1 - ); - self.pre_allocated_shadow_mappings - .insert(new_shadow_mapping.0, new_shadow_mapping.1); + if let Some(new_reserved_region) = new_reserved_region { + self.pre_allocated_shadow_mappings.push(new_reserved_region); + } + } + for newly_committed_region in newly_committed_regions { self.shadow_pages - .insert(new_shadow_mapping.0 .0..new_shadow_mapping.0 .1); + .insert(newly_committed_region.start()..newly_committed_region.end()); + self.mappings + .insert(newly_committed_region.start(), newly_committed_region); } } - // log::trace!("shadow_mapping_start: {:x}, shadow_size: {:x}", shadow_mapping_start, (end - start) / 8); if unpoison { Self::unpoison(shadow_mapping_start, end - start); } - (shadow_mapping_start, (end - start) / 8) + (shadow_mapping_start, (end - start) / 8 + 1) + } + + #[inline] + #[must_use] + fn check_shadow_aligned(&mut self, address: *const c_void, size: usize) -> bool { + assert_eq!( + (address as usize) & 7, + 0, + "check_shadow_aligned used when address is not aligned. Use check_shadow" + ); + assert_eq!( + size & 7, + 0, + "check_shadow_aligned used when size is not aligned. Use check_shadow" + ); + + if size == 0 { + return true; + } + + let shadow_addr = map_to_shadow!(self, (address as usize)); + let shadow_size = size >> 3; + let buf = unsafe { std::slice::from_raw_parts_mut(shadow_addr as *mut u8, shadow_size) }; + let (prefix, aligned, suffix) = unsafe { buf.align_to::() }; + if !prefix.iter().all(|&x| x == 0xff) + || !suffix.iter().all(|&x| x == 0xff) + || !aligned + .iter() + .all(|&x| x == 0xffffffffffffffffffffffffffffffffu128) + { + return false; + } + + true } + /// Checks whether the given address up till size is valid unpoisoned shadow memory. + /// TODO: check edge cases + #[inline] + #[must_use] + pub fn check_shadow(&mut self, address: *const c_void, size: usize) -> bool { + //the algorithm for check_shadow is as follows: + //1. we first check if its managed. if is not then exit + //2. we check if it is aligned. this should be 99% of accesses. If it is do an aligned check and leave + //3. if it is not split the check into 3 parts: the pre-aligment bytes, the aligned portion, and the post alignment posts + //3. The prealignment bytes are the unaligned bytes (if any) located in the qword preceding the aligned portion. Perform a specialied check to ensure that the bytes from [start, align(start, 8)) are valid. In this case align(start,8) aligns start to the next 8 byte boundary. + //4. The aligned check is where the address and the size is 8 byte aligned. Use check_shadow_aligned to check it + //5. The post-alignment is the same as pre-alignment except it is the qword following the aligned portion. Use a specialized check to ensure that [end & ~7, end) is valid. + + if size == 0 + /*|| !self.is_managed(address as *mut c_void)*/ + { + return true; + } + + if !self.is_managed(address as *mut c_void) { + log::trace!("unmanaged address to check_shadow: {:?}, {size:x}", address); + return true; + } + //fast path. most buffers are likely 8 byte aligned in size and address + if (address as usize).trailing_zeros() >= 3 && size.trailing_zeros() >= 3 { + return self.check_shadow_aligned(address, size); + } + + //slow path. check everything + let start_address = address as usize; + let end_address = start_address + size; + + //8 byte align the start/end so we can use check_shadow_aligned for the majority of it + //in the case of subqword accesses (i.e,, the entire access is located within 1 qword), aligned_start > aligned_end naturally + let aligned_start = (start_address + 7) & !7; + let aligned_end = end_address & !7; + + let start_offset = start_address & 7; + let end_offset = end_address & 7; + + //if the start is unaligned + if start_address != aligned_start { + let start_shadow = map_to_shadow!(self, start_address); + + let start_mask: u8 = 0xff << (8 - start_offset); + if unsafe { (start_shadow as *const u8).read() } & start_mask != start_mask { + return false; + } + } + + //if this is not true then it must be a subqword access as the start will be larger than the end + if aligned_start <= aligned_end { + if !self + .check_shadow_aligned(aligned_start as *const c_void, aligned_end - aligned_start) + { + return false; + } + + if end_address != aligned_end { + let end_shadow = map_to_shadow!(self, end_address); + + let end_mask = 0xff << (8 - end_offset); //we want to check from the beginning of the qword to the offset + if unsafe { (end_shadow as *const u8).read() } & end_mask != end_mask { + return false; + } + } + } + // self.map_shadow_for_region(address, address + size, false); + + true + } /// Maps the address to a shadow address #[inline] #[must_use] @@ -473,7 +536,7 @@ impl Allocator { #[inline] pub fn is_managed(&self, ptr: *mut c_void) -> bool { //self.allocations.contains_key(&(ptr as usize)) - self.base_mapping_addr <= ptr as usize && (ptr as usize) < self.current_mapping_addr + self.shadow_offset <= ptr as usize && (ptr as usize) < self.current_mapping_addr } /// Checks if any of the allocations has not been freed @@ -488,17 +551,19 @@ impl Allocator { /// Unpoison all the memory that is currently mapped with read/write permissions. pub fn unpoison_all_existing_memory(&mut self) { - RangeDetails::enumerate_with_prot(PageProtection::NoAccess, &mut |range: &RangeDetails| { - if range.protection() as u32 & PageProtection::ReadWrite as u32 != 0 { + RangeDetails::enumerate_with_prot( + PageProtection::Read, + &mut |range: &RangeDetails| -> bool { let start = range.memory_range().base_address().0 as usize; let end = start + range.memory_range().size(); - if !self.pre_allocated_shadow_mappings.is_empty() && start == 1 << self.shadow_bit { - return true; + + if !self.is_managed(start as *mut c_void) { + self.map_shadow_for_region(start, end, true); } - self.map_shadow_for_region(start, end, true); - } - true - }); + + true + }, + ); } /// Initialize the allocator, making sure a valid shadow bit is selected. @@ -512,57 +577,61 @@ impl Allocator { let mut occupied_ranges: Vec<(usize, usize)> = vec![]; // max(userspace address) this is usually 0x8_0000_0000_0000 - 1 on x64 linux. + #[cfg(unix)] let mut userspace_max: usize = 0; // Enumerate memory ranges that are already occupied. - for area in MemoryAreas::open(None).unwrap() { - let start = area.as_ref().unwrap().start(); - let end = area.unwrap().end(); - occupied_ranges.push((start, end)); - // log::trace!("Occupied {:x} {:x}", start, end); - let base: usize = 2; - // On x64, if end > 2**48, then that's in vsyscall or something. - #[cfg(all(unix, target_arch = "x86_64"))] - if end <= base.pow(48) && end > userspace_max { - userspace_max = end; - } - #[cfg(all(not(unix), target_arch = "x86_64"))] - if (end >> 3) <= base.pow(44) && (end >> 3) > userspace_max { - userspace_max = end >> 3; - } + RangeDetails::enumerate_with_prot( + PageProtection::Read, + &mut |range: &RangeDetails| -> bool { + let start = range.memory_range().base_address().0 as usize; + let end = start + range.memory_range().size(); + occupied_ranges.push((start, end)); + // On x64, if end > 2**48, then that's in vsyscall or something. + #[cfg(all(unix, target_arch = "x86_64"))] + if end <= 2_usize.pow(48) && end > userspace_max { + userspace_max = end; + } + // + // #[cfg(all(not(unix), target_arch = "x86_64"))] + // if end <= 2_usize.pow(64) && end > userspace_max { + // userspace_max = end; + // } + + // On aarch64, if end > 2**52, then range is not in userspace + #[cfg(target_arch = "aarch64")] + if end <= 2_usize.pow(52) && end > userspace_max { + userspace_max = end; + } - // On aarch64, if end > 2**52, then range is not in userspace - #[cfg(target_arch = "aarch64")] - if end <= base.pow(52) && end > userspace_max { - userspace_max = end; - } - } + true + }, + ); - let mut maxbit = 0; + #[cfg(unix)] + let mut maxbit = 63; + #[cfg(windows)] + let maxbit = 63; + #[cfg(unix)] for power in 1..64 { - let base: usize = 2; - if base.pow(power) > userspace_max { + if 2_usize.pow(power) > userspace_max { maxbit = power; break; } } { - for try_shadow_bit in &[maxbit, maxbit - 4, maxbit - 3, maxbit - 2] { + for try_shadow_bit in 44..maxbit { let addr: usize = 1 << try_shadow_bit; let shadow_start = addr; let shadow_end = addr + addr + addr; let mut good_candidate = true; // check if the proposed shadow bit overlaps with occupied ranges. for (start, end) in &occupied_ranges { - // log::trace!("{:x} {:x}, {:x} {:x} -> {:x} - {:x}", shadow_start, shadow_end, start, end, - // shadow_start + ((start >> 3) & ((1 << (try_shadow_bit + 1)) - 1)), - // shadow_start + ((end >> 3) & ((1 << (try_shadow_bit + 1)) - 1)) - // ); if (shadow_start <= *end) && (*start <= shadow_end) { log::trace!("{:x} {:x}, {:x} {:x}", shadow_start, shadow_end, start, end); - log::warn!("shadow_bit {try_shadow_bit:x} is not suitable"); + log::warn!("shadow_bit {try_shadow_bit:} is not suitable"); good_candidate = false; break; } @@ -573,7 +642,7 @@ impl Allocator { > shadow_end) { log::warn!( - "shadow_bit {try_shadow_bit:x} is not suitable (shadow out of range)" + "shadow_bit {try_shadow_bit:} is not suitable (shadow out of range)" ); good_candidate = false; break; @@ -582,33 +651,26 @@ impl Allocator { if good_candidate { // We reserve the shadow memory space of size addr*2, but don't commit it. - if let Ok(mapping) = MmapOptions::new(1 << (*try_shadow_bit + 1)) + if let Ok(mapping) = MmapOptions::new(1 << (try_shadow_bit + 1)) .unwrap() .with_flags(MmapFlags::NO_RESERVE) .with_address(addr) .reserve_mut() { - shadow_bit = (*try_shadow_bit).try_into().unwrap(); - - log::warn!("shadow_bit {shadow_bit:x} is suitable"); - log::trace!( - "adding pre_allocated_shadow_mappings {:x} - {:x} with size {:}", - addr, - (addr + (1 << (shadow_bit + 1))), - mapping.len() - ); + shadow_bit = (try_shadow_bit).try_into().unwrap(); - self.pre_allocated_shadow_mappings - .insert((addr, (addr + (1 << (shadow_bit + 1)))), mapping); + log::warn!("shadow_bit {shadow_bit:} is suitable"); + self.pre_allocated_shadow_mappings.push(mapping); + self.using_pre_allocated_shadow_mapping = true; break; } - log::warn!("shadow_bit {try_shadow_bit:x} is not suitable - failed to allocate shadow memory"); + log::warn!("shadow_bit {try_shadow_bit:} is not suitable - failed to allocate shadow memory"); } } } - // assert!(shadow_bit != 0); - // attempt to pre-map the entire shadow-memory space + log::warn!("shadow_bit: {shadow_bit}"); + assert!(shadow_bit != 0); let addr: usize = 1 << shadow_bit; @@ -643,7 +705,8 @@ impl Default for Allocator { max_total_allocation: 1 << 32, allocation_backtraces: false, page_size, - pre_allocated_shadow_mappings: HashMap::new(), + pre_allocated_shadow_mappings: Vec::new(), + using_pre_allocated_shadow_mapping: false, mappings: HashMap::new(), shadow_offset: 0, shadow_bit: 0, @@ -657,3 +720,61 @@ impl Default for Allocator { } } } + +#[test] +fn check_shadow() { + let mut allocator = Allocator::default(); + allocator.init(); + + let allocation = unsafe { allocator.alloc(8, 8) }; + assert!(!allocation.is_null()); + assert!(allocator.check_shadow(allocation, 1)); + assert!(allocator.check_shadow(allocation, 2)); + assert!(allocator.check_shadow(allocation, 3)); + assert!(allocator.check_shadow(allocation, 4)); + assert!(allocator.check_shadow(allocation, 5)); + assert!(allocator.check_shadow(allocation, 6)); + assert!(allocator.check_shadow(allocation, 7)); + assert!(allocator.check_shadow(allocation, 8)); + assert!(!allocator.check_shadow(allocation, 9)); + assert!(!allocator.check_shadow(allocation, 10)); + assert!(allocator.check_shadow(unsafe { allocation.offset(1) }, 7)); + assert!(allocator.check_shadow(unsafe { allocation.offset(2) }, 6)); + assert!(allocator.check_shadow(unsafe { allocation.offset(3) }, 5)); + assert!(allocator.check_shadow(unsafe { allocation.offset(4) }, 4)); + assert!(allocator.check_shadow(unsafe { allocation.offset(5) }, 3)); + assert!(allocator.check_shadow(unsafe { allocation.offset(6) }, 2)); + assert!(allocator.check_shadow(unsafe { allocation.offset(7) }, 1)); + assert!(allocator.check_shadow(unsafe { allocation.offset(8) }, 0)); + assert!(!allocator.check_shadow(unsafe { allocation.offset(9) }, 1)); + assert!(!allocator.check_shadow(unsafe { allocation.offset(9) }, 8)); + assert!(!allocator.check_shadow(unsafe { allocation.offset(1) }, 9)); + assert!(!allocator.check_shadow(unsafe { allocation.offset(1) }, 8)); + assert!(!allocator.check_shadow(unsafe { allocation.offset(2) }, 8)); + assert!(!allocator.check_shadow(unsafe { allocation.offset(3) }, 8)); + let allocation = unsafe { allocator.alloc(0xc, 0) }; + assert!(allocator.check_shadow(unsafe { allocation.offset(4) }, 8)); + //subqword access + assert!(allocator.check_shadow(unsafe { allocation.offset(3) }, 2)); + //unaligned access + assert!(allocator.check_shadow(unsafe { allocation.offset(3) }, 8)); + let allocation = unsafe { allocator.alloc(0x20, 0) }; + //access with unaligned parts at the beginning and end + assert!(allocator.check_shadow(unsafe { allocation.offset(10) }, 21)); + //invalid, unaligned access + assert!(!allocator.check_shadow(unsafe { allocation.offset(10) }, 29)); + let allocation = unsafe { allocator.alloc(4, 0) }; + assert!(!allocation.is_null()); + assert!(allocator.check_shadow(allocation, 1)); + assert!(allocator.check_shadow(allocation, 2)); + assert!(allocator.check_shadow(allocation, 3)); + assert!(allocator.check_shadow(allocation, 4)); + assert!(!allocator.check_shadow(allocation, 5)); + assert!(!allocator.check_shadow(allocation, 6)); + assert!(!allocator.check_shadow(allocation, 7)); + assert!(!allocator.check_shadow(allocation, 8)); + let allocation = unsafe { allocator.alloc(0xc, 0) }; + assert!(allocator.check_shadow(unsafe { allocation.offset(4) }, 8)); + let allocation = unsafe { allocator.alloc(0x3c, 0) }; + assert!(allocator.check_shadow(unsafe { allocation.offset(0x3a) }, 2)); +} diff --git a/libafl_frida/src/asan/asan_rt.rs b/libafl_frida/src/asan/asan_rt.rs index 3a69f72d6c..55300a7fd8 100644 --- a/libafl_frida/src/asan/asan_rt.rs +++ b/libafl_frida/src/asan/asan_rt.rs @@ -10,7 +10,12 @@ use core::{ fmt::{self, Debug, Formatter}, ptr::addr_of_mut, }; -use std::{ffi::c_void, num::NonZeroUsize, ptr::write_volatile, rc::Rc, sync::MutexGuard}; +use std::{ + ffi::{c_char, c_void}, + ptr::write_volatile, + rc::Rc, + sync::MutexGuard, +}; use backtrace::Backtrace; use dynasmrt::{dynasm, DynasmApi, DynasmLabelApi}; @@ -25,14 +30,7 @@ use frida_gum::{ use frida_gum_sys::Insn; use hashbrown::HashMap; use libafl_bolts::{cli::FuzzerOptions, AsSlice}; -// #[cfg(target_vendor = "apple")] -// use libc::RLIMIT_STACK; -use libc::{c_char, wchar_t}; -// #[cfg(target_vendor = "apple")] -// use libc::{getrlimit, rlimit}; -// #[cfg(all(unix, not(target_vendor = "apple")))] -// use libc::{getrlimit64, rlimit64}; -use nix::sys::mman::{mmap, MapFlags, ProtFlags}; +use libc::wchar_t; use rangemap::RangeMap; #[cfg(target_arch = "aarch64")] use yaxpeax_arch::Arch; @@ -63,11 +61,6 @@ extern "C" { fn tls_ptr() -> *const c_void; } -// #[cfg(target_vendor = "apple")] -// const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON; -// #[cfg(not(target_vendor = "apple"))] -// const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANONYMOUS; - /// The count of registers that need to be saved by the asan runtime /// sixteen general purpose registers are put in this order, rax, rbx, rcx, rdx, rbp, rsp, rsi, rdi, r8-r15, plus instrumented rip, accessed memory addr and true rip #[cfg(target_arch = "x86_64")] @@ -136,7 +129,9 @@ pub struct AsanRuntime { suppressed_addresses: Vec, skip_ranges: Vec, continue_on_error: bool, - shadow_check_func: Option bool>, + pub(crate) hooks: HashMap, + pub(crate) hooks_enabled: bool, + pc: Option, #[cfg(target_arch = "aarch64")] eh_frame: [u32; ASAN_EH_FRAME_DWORD_COUNT], @@ -168,9 +163,10 @@ impl FridaRuntime for AsanRuntime { AsanErrors::get_mut_blocking().set_continue_on_error(self.continue_on_error); + self.register_hooks(gum); + self.generate_instrumentation_blobs(); - self.generate_shadow_check_function(); self.unpoison_all_existing_memory(); self.module_map = Some(module_map.clone()); @@ -184,75 +180,6 @@ impl FridaRuntime for AsanRuntime { } })); - self.hook_functions(gum); - - /* unsafe { - let mem = self.allocator.alloc(0xac + 2, 8); - log::info!("Test0"); - /* - 0x555555916ce9 je libafl_frida::asan_rt::AsanRuntime::init+14852 - 0x555555916cef mov rdi, r15 <0x555558392338> - */ - assert!((self.shadow_check_func.unwrap())( - (mem as usize) as *const c_void, - 0x00 - )); - log::info!("Test1"); - assert!((self.shadow_check_func.unwrap())( - (mem as usize) as *const c_void, - 0xac - )); - log::info!("Test2"); - assert!((self.shadow_check_func.unwrap())( - ((mem as usize) + 2) as *const c_void, - 0xac - )); - log::info!("Test3"); - assert!(!(self.shadow_check_func.unwrap())( - ((mem as usize) + 3) as *const c_void, - 0xac - )); - log::info!("Test4"); - assert!(!(self.shadow_check_func.unwrap())( - ((mem as isize) + -1) as *const c_void, - 0xac - )); - log::info!("Test5"); - assert!((self.shadow_check_func.unwrap())( - ((mem as usize) + 2 + 0xa4) as *const c_void, - 8 - )); - log::info!("Test6"); - assert!((self.shadow_check_func.unwrap())( - ((mem as usize) + 2 + 0xa6) as *const c_void, - 6 - )); - log::info!("Test7"); - assert!(!(self.shadow_check_func.unwrap())( - ((mem as usize) + 2 + 0xa8) as *const c_void, - 6 - )); - log::info!("Test8"); - assert!(!(self.shadow_check_func.unwrap())( - ((mem as usize) + 2 + 0xa8) as *const c_void, - 0xac - )); - log::info!("Test9"); - assert!((self.shadow_check_func.unwrap())( - ((mem as usize) + 4 + 0xa8) as *const c_void, - 0x1 - )); - log::info!("FIN"); - - for i in 0..0xad { - assert!((self.shadow_check_func.unwrap())( - ((mem as usize) + i) as *const c_void, - 0x01 - )); - } - // assert!((self.shadow_check_func.unwrap())(((mem2 as usize) + 8875) as *const c_void, 4)); - }*/ - self.register_thread(); } fn pre_exec( @@ -263,6 +190,7 @@ impl FridaRuntime for AsanRuntime { let slice = target_bytes.as_slice(); self.unpoison(slice.as_ptr() as usize, slice.len()); + self.enable_hooks(); Ok(()) } @@ -270,6 +198,7 @@ impl FridaRuntime for AsanRuntime { &mut self, input: &I, ) -> Result<(), libafl::Error> { + self.disable_hooks(); if self.check_for_leaks_enabled { self.check_for_leaks(); } @@ -322,12 +251,6 @@ impl AsanRuntime { &mut self.allocator } - /// The function that checks the shadow byte - #[must_use] - pub fn shadow_check_func(&self) -> &Option bool> { - &self.shadow_check_func - } - /// Check if the test leaked any memory and report it if so. pub fn check_for_leaks(&mut self) { self.allocator.check_for_leaks(); @@ -369,25 +292,35 @@ impl AsanRuntime { /// Unpoison all the memory that is currently mapped with read/write permissions. #[allow(clippy::unused_self)] - fn unpoison_all_existing_memory(&mut self) { + pub fn unpoison_all_existing_memory(&mut self) { self.allocator.unpoison_all_existing_memory(); } + /// Enable all function hooks + pub fn enable_hooks(&mut self) { + self.hooks_enabled = true; + } + /// Disable all function hooks + pub fn disable_hooks(&mut self) { + self.hooks_enabled = false; + } + /// Register the current thread with the runtime, implementing shadow memory for its stack and /// tls mappings. #[allow(clippy::unused_self)] #[cfg(not(target_os = "ios"))] pub fn register_thread(&mut self) { let (stack_start, stack_end) = Self::current_stack(); + let (tls_start, tls_end) = Self::current_tls(); + log::info!( + "registering thread with stack {stack_start:x}:{stack_end:x} and tls {tls_start:x}:{tls_end:x}" + ); self.allocator .map_shadow_for_region(stack_start, stack_end, true); - let (tls_start, tls_end) = Self::current_tls(); + #[cfg(unix)] self.allocator .map_shadow_for_region(tls_start, tls_end, true); - log::info!( - "registering thread with stack {stack_start:x}:{stack_end:x} and tls {tls_start:x}:{tls_end:x}" - ); } /// Register the current thread with the runtime, implementing shadow memory for its stack mapping. @@ -410,11 +343,11 @@ impl AsanRuntime { // rlim_max: 0, // }; // assert!(unsafe { getrlimit(RLIMIT_STACK, addr_of_mut!(stack_rlimit)) } == 0); - + // // stack_rlimit.rlim_cur as usize // } - - /// Get the maximum stack size for the current stack + // + // /// Get the maximum stack size for the current stack // #[must_use] // #[cfg(all(unix, not(target_vendor = "apple")))] // fn max_stack_size() -> usize { @@ -423,7 +356,7 @@ impl AsanRuntime { // rlim_max: 0, // }; // assert!(unsafe { getrlimit64(RLIMIT_STACK, addr_of_mut!(stack_rlimit)) } == 0); - + // // stack_rlimit.rlim_cur as usize // } @@ -468,32 +401,43 @@ impl AsanRuntime { unsafe { write_volatile(&mut stack_var, 0xfadbeef); } - - // let start = range_details.memory_range().base_address().0 as usize; - // let end = start + range_details.memory_range().size(); - // (start, end) - Self::range_for_address(stack_address) - - // let max_start = end - Self::max_stack_size(); - - // let flags = ANONYMOUS_FLAG | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE; - // #[cfg(not(target_vendor = "apple"))] - // let flags = flags | MapFlags::MAP_STACK; - - // if start != max_start { - // let mapping = unsafe { - // mmap( - // NonZeroUsize::new(max_start), - // NonZeroUsize::new(start - max_start).unwrap(), - // ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, - // flags, - // -1, - // 0, - // ) - // }; - // assert!(mapping.unwrap() as usize == max_start); - // } - // (max_start, end) + let mut range = None; + for area in mmap_rs::MemoryAreas::open(None).unwrap() { + let area_ref = area.as_ref().unwrap(); + if area_ref.start() <= stack_address && stack_address <= area_ref.end() { + range = Some((area_ref.end() - 1024 * 1024, area_ref.end())); + break; + } + } + if let Some((start, end)) = range { + // #[cfg(unix)] + // { + // let max_start = end - Self::max_stack_size(); + // + // let flags = ANONYMOUS_FLAG | MapFlags::MAP_FIXED | MapFlags::MAP_PRIVATE; + // #[cfg(not(target_vendor = "apple"))] + // let flags = flags | MapFlags::MAP_STACK; + // + // if start != max_start { + // let mapping = unsafe { + // mmap( + // NonZeroUsize::new(max_start), + // NonZeroUsize::new(start - max_start).unwrap(), + // ProtFlags::PROT_READ | ProtFlags::PROT_WRITE, + // flags, + // -1, + // 0, + // ) + // }; + // assert!(mapping.unwrap() as usize == max_start); + // } + // (max_start, end) + // } + // #[cfg(windows)] + (start, end) + } else { + panic!("Couldn't find stack mapping!"); + } } /// Determine the tls start, end for the currently running thread @@ -516,51 +460,63 @@ impl AsanRuntime { } /// Gets the current instruction pointer - #[cfg(target_arch = "aarch64")] #[must_use] #[inline] - pub fn pc() -> usize { - Interceptor::current_invocation().cpu_context().pc() as usize + pub fn pc(&self) -> usize { + if let Some(pc) = self.pc.as_ref() { + *pc + } else { + 0 + } } - /// Gets the current instruction pointer - #[cfg(target_arch = "x86_64")] - #[must_use] - #[inline] - pub fn pc() -> usize { - Interceptor::current_invocation().cpu_context().rip() as usize + /// Set the current program counter at hook time + pub fn set_pc(&mut self, pc: usize) { + self.pc = Some(pc); + } + /// Unset the current program counter + pub fn unset_pc(&mut self) { + self.pc = None; } - /// Hook all functions required for ASAN to function, replacing them with our own - /// implementations. - #[allow(clippy::items_after_statements)] + /// Register the required hooks #[allow(clippy::too_many_lines)] - fn hook_functions(&mut self, gum: &Gum) { + pub fn register_hooks(&mut self, gum: &Gum) { let mut interceptor = Interceptor::obtain(gum); macro_rules! hook_func { ($lib:expr, $name:ident, ($($param:ident : $param_type:ty),*), $return_type:ty) => { paste::paste! { log::trace!("Hooking {}", stringify!($name)); - extern "C" { - fn $name($($param: $param_type),*) -> $return_type; - } + + let target_function = frida_gum::Module::find_export_by_name($lib, stringify!($name)).expect("Failed to find function"); + self.hooks.insert(stringify!($name).to_string(), target_function); + #[allow(non_snake_case)] unsafe extern "C" fn []($($param: $param_type),*) -> $return_type { let mut invocation = Interceptor::current_invocation(); let this = &mut *(invocation.replacement_data().unwrap().0 as *mut AsanRuntime); - let real_address = this.real_address_for_stalked(invocation.return_addr()); - if !this.suppressed_addresses.contains(&real_address) && this.module_map.as_ref().unwrap().find(real_address as u64).is_some() { - this.[]($($param),*) + // let real_address = this.real_address_for_stalked(invocation.return_addr()); + if this.hooks_enabled { + let previous_hook_state = this.hooks_enabled; + this.hooks_enabled = false; + let ret = this.[]($($param),*); + this.hooks_enabled = previous_hook_state; + ret } else { - $name($($param),*) + let original = std::mem::transmute::<*const c_void, extern "C" fn($($param: $param_type),*) -> $return_type>(this.hooks.get(&stringify!($name).to_string()).unwrap().0); + let previous_hook_state = this.hooks_enabled; + this.hooks_enabled = false; + let ret = (original)($($param),*); + this.hooks_enabled = previous_hook_state; + ret } } - interceptor.replace( - frida_gum::Module::find_export_by_name($lib, stringify!($name)).expect("Failed to find function"), + let _ = interceptor.replace( + target_function, NativePointer([] as *mut c_void), NativePointer(core::ptr::from_mut(self) as *mut c_void) - ).ok(); + ); } } } @@ -568,45 +524,307 @@ impl AsanRuntime { macro_rules! hook_func_with_check { ($lib:expr, $name:ident, ($($param:ident : $param_type:ty),*), $return_type:ty) => { paste::paste! { - extern "C" { - fn $name($($param: $param_type),*) -> $return_type; - } + log::trace!("Hooking {}", stringify!($name)); + let target_function = frida_gum::Module::find_export_by_name($lib, stringify!($name)).expect("Failed to find function"); + self.hooks.insert(stringify!($name).to_string(), target_function); + #[allow(non_snake_case)] unsafe extern "C" fn []($($param: $param_type),*) -> $return_type { let mut invocation = Interceptor::current_invocation(); let this = &mut *(invocation.replacement_data().unwrap().0 as *mut AsanRuntime); - if this.[]($($param),*) { - this.[]($($param),*) + if this.hooks_enabled && this.[]($($param),*) { + let previous_hook_state = this.hooks_enabled; + this.hooks_enabled = false; + let ret = this.[]($($param),*); + this.hooks_enabled = previous_hook_state; + ret } else { - $name($($param),*) + let original = std::mem::transmute::<*const c_void, extern "C" fn($($param: $param_type),*) -> $return_type>(this.hooks.get(&stringify!($name).to_string()).unwrap().0); + let previous_hook_state = this.hooks_enabled; + this.hooks_enabled = false; + let ret = (original)($($param),*); + this.hooks_enabled = previous_hook_state; + ret } } - interceptor.replace( - frida_gum::Module::find_export_by_name($lib, stringify!($name)).expect("Failed to find function"), + let _ = interceptor.replace( + target_function, NativePointer([] as *mut c_void), NativePointer(core::ptr::from_mut(self) as *mut c_void) - ).ok(); + ); } } } - // Hook the memory allocator functions + + #[cfg(not(windows))] hook_func!(None, malloc, (size: usize), *mut c_void); + #[cfg(not(windows))] hook_func!(None, calloc, (nmemb: usize, size: usize), *mut c_void); + #[cfg(not(windows))] hook_func!(None, realloc, (ptr: *mut c_void, size: usize), *mut c_void); - hook_func_with_check!(None, free, (ptr: *mut c_void), ()); - #[cfg(not(target_vendor = "apple"))] + #[cfg(not(windows))] + hook_func_with_check!(None, free, (ptr: *mut c_void), usize); + #[cfg(not(any(target_vendor = "apple", windows)))] hook_func!(None, memalign, (size: usize, alignment: usize), *mut c_void); + #[cfg(not(windows))] hook_func!( None, posix_memalign, (pptr: *mut *mut c_void, size: usize, alignment: usize), i32 ); - #[cfg(not(target_vendor = "apple"))] + #[cfg(not(any(target_vendor = "apple", windows)))] hook_func!(None, malloc_usable_size, (ptr: *mut c_void), usize); + // // #[cfg(windows)] + // hook_priv_func!( + // "c:\\windows\\system32\\ntdll.dll", + // LdrpCallInitRoutine, + // (base_address: *const c_void, reason: usize, context: usize, entry_point: usize), + // usize + // ); + // #[cfg(windows)] + // hook_func!( + // None, + // LoadLibraryExW, + // (path: *const c_void, file: usize, flags: i32), + // usize + // ); + // #[cfg(windows)] + // hook_func!( + // None, + // CreateThread, + // (thread_attributes: *const c_void, stack_size: usize, start_address: *const c_void, parameter: *const c_void, creation_flags: i32, thread_id: *mut i32), + // usize + // ); + // #[cfg(windows)] + // hook_func!( + // None, + // CreateFileMappingW, + // (file: usize, file_mapping_attributes: *const c_void, protect: i32, maximum_size_high: u32, maximum_size_low: u32, name: *const c_void), + // usize + // ); + + #[cfg(windows)] + for libname in [ + "ntdll", + "win32u", + "ucrtbase", + "kernelbase", + "kernel32", + "msvcrt", + "api-ms-win-crt-private-l1-1-0", + "api-ms-win-crt-private-l1-1-0.dll", + "api-ms-win-core-heap-l1-1-0", + "api-ms-win-core-heap-l2-1-0", + "api-ms-win-core-heap-obsolete-l1-1-0", + // "vcruntime140", + ] { + log::info!("Hooking allocator functions in {}", libname); + for export in Module::enumerate_exports(libname) { + // log::trace!("- {}", export.name); + match &export.name[..] { + "NtGdiCreateCompatibleDC" => { + hook_func!(Some(libname), NtGdiCreateCompatibleDC, (hdc: *const c_void), *mut c_void); + } + "RtlCreateHeap" => { + hook_func!(Some(libname), RtlCreateHeap, (flags: u32, heap_base: *const c_void, reserve_size: usize, commit_size: usize, lock: *const c_void, parameters: *const c_void), *mut c_void); + } + "RtlDestroyHeap" => { + hook_func!(Some(libname), RtlDestroyHeap, (handle: *const c_void), *mut c_void); + } + "HeapAlloc" => { + hook_func!(Some(libname), HeapAlloc, (handle: *mut c_void, flags: u32, bytes: usize), *mut c_void); + } + "RtlAllocateHeap" => { + hook_func!(Some(libname), RtlAllocateHeap, (handle: *mut c_void, flags: u32, bytes: usize), *mut c_void); + } + "HeapFree" => { + hook_func_with_check!(Some(libname), HeapFree, (handle: *mut c_void, flags: u32, mem: *mut c_void), bool); + } + "RtlFreeHeap" => { + hook_func_with_check!(Some(libname), RtlFreeHeap, (handle: *mut c_void, flags: u32, mem: *mut c_void), usize); + } + "HeapSize" => { + hook_func_with_check!(Some(libname), HeapSize, (handle: *mut c_void, flags: u32, mem: *mut c_void), usize); + } + "RtlSizeHeap" => { + hook_func_with_check!(Some(libname), RtlSizeHeap , (handle: *mut c_void, flags: u32, mem: *mut c_void), usize); + } + "RtlReAllocateHeap" => { + hook_func!( + Some(libname), + RtlReAllocateHeap, + ( + handle: *mut c_void, + flags: u32, + ptr: *mut c_void, + size: usize + ), + *mut c_void + ); + } + "HeapReAlloc" => { + hook_func!( + Some(libname), + HeapReAlloc, + ( + handle: *mut c_void, + flags: u32, + ptr: *mut c_void, + size: usize + ), + *mut c_void + ); + } + "LocalAlloc" => { + hook_func!(Some(libname), LocalAlloc, (flags: u32, size: usize), *mut c_void); + } + "LocalReAlloc" => { + hook_func!(Some(libname), LocalReAlloc, (mem: *mut c_void, size: usize, flags: u32), *mut c_void); + } + "LocalHandle" => { + hook_func_with_check!(Some(libname), LocalHandle, (mem: *mut c_void), *mut c_void); + } + "LocalLock" => { + hook_func_with_check!(Some(libname), LocalLock, (mem: *mut c_void), *mut c_void); + } + "LocalUnlock" => { + hook_func_with_check!(Some(libname), LocalUnlock, (mem: *mut c_void), bool); + } + "LocalSize" => { + hook_func_with_check!(Some(libname), LocalSize, (mem: *mut c_void),usize); + } + "LocalFree" => { + hook_func_with_check!(Some(libname), LocalFree, (mem: *mut c_void), *mut c_void); + } + "LocalFlags" => { + hook_func_with_check!(Some(libname), LocalFlags, (mem: *mut c_void),u32); + } + "GlobalAlloc" => { + hook_func!(Some(libname), GlobalAlloc, (flags: u32, size: usize), *mut c_void); + } + "GlobalReAlloc" => { + hook_func!(Some(libname), GlobalReAlloc, (mem: *mut c_void, flags: u32, size: usize), *mut c_void); + } + "GlobalHandle" => { + hook_func_with_check!(Some(libname), GlobalHandle, (mem: *mut c_void), *mut c_void); + } + "GlobalLock" => { + hook_func_with_check!(Some(libname), GlobalLock, (mem: *mut c_void), *mut c_void); + } + "GlobalUnlock" => { + hook_func_with_check!(Some(libname), GlobalUnlock, (mem: *mut c_void), bool); + } + "GlobalSize" => { + hook_func_with_check!(Some(libname), GlobalSize, (mem: *mut c_void),usize); + } + "GlobalFree" => { + hook_func_with_check!(Some(libname), GlobalFree, (mem: *mut c_void), *mut c_void); + } + "GlobalFlags" => { + hook_func_with_check!(Some(libname), GlobalFlags, (mem: *mut c_void),u32); + } + "memmove" => { + hook_func!( + Some(libname), + memmove, + (dest: *mut c_void, src: *const c_void, n: usize), + *mut c_void + ); + } + "memcpy" => { + hook_func!( + Some(libname), + memcpy, + (dest: *mut c_void, src: *const c_void, n: usize), + *mut c_void + ); + } + "malloc" => { + hook_func!(Some(libname), malloc, (size: usize), *mut c_void); + } + "_o_malloc" | "o_malloc" => { + hook_func!(Some(libname), _o_malloc, (size: usize), *mut c_void); + } + "calloc" => { + hook_func!(Some(libname), calloc, (nmemb: usize, size: usize), *mut c_void); + } + "_o_calloc" | "o_calloc" => { + hook_func!(Some(libname), _o_calloc, (nmemb: usize, size: usize), *mut c_void); + } + "realloc" => { + hook_func!(Some(libname), realloc, (ptr: *mut c_void, size: usize), *mut c_void); + } + "_o_realloc" | "o_realloc" => { + hook_func!(Some(libname), _o_realloc, (ptr: *mut c_void, size: usize), *mut c_void); + } + "free" => { + hook_func_with_check!(Some(libname), free, (ptr: *mut c_void), usize); + } + "_o_free" | "o_free" => { + hook_func_with_check!(Some(libname), _o_free, (ptr: *mut c_void), usize); + } + "_write" => { + hook_func!( + Some(libname), + _write, + (fd: i32, buf: *const c_void, count: usize), + usize + ); + } + "_read" => { + hook_func!( + Some(libname), + _read, + (fd: i32, buf: *mut c_void, count: usize), + usize + ); + } + "MapViewOfFile" => { + hook_func!( + Some(libname), + MapViewOfFile, + (handle: *const c_void, desired_access: u32, file_offset_high: u32, file_offset_low: u32, size: usize), + *const c_void + ); + } + "LoadLibraryExW" => { + hook_func!( + Some(libname), + LoadLibraryExW, + (path: *const c_void, file: usize, flags: i32), + usize + ); + } + "LdrLoadDll" => { + hook_func!( + Some(libname), + LdrLoadDll, + (search_path: *const c_void, charecteristics: *const u32, dll_name: *const c_void, base_address: *mut *const c_void), + usize + ); + } + _ => (), + } + } + } + + #[cfg(target_os = "linux")] + let cpp_libs = [ + "libc++.so", + "libc++.so.1", + "libc++abi.so.1", + "libc++_shared.so", + "libstdc++.so", + "libstdc++.so.6", + ]; + + #[cfg(target_vendor = "apple")] + let cpp_libs = ["libc++.1.dylib", "libc++abi.dylib", "libsystem_c.dylib"]; - for libname in ["libc++.so", "libc++.so.1", "libc++_shared.so"] { + #[cfg(not(windows))] + for libname in cpp_libs { log::info!("Hooking c++ functions in {}", libname); for export in Module::enumerate_exports(libname) { match &export.name[..] { @@ -665,17 +883,17 @@ impl AsanRuntime { ); } "_ZdaPv" => { - hook_func!(Some(libname), _ZdaPv, (ptr: *mut c_void), ()); + hook_func!(Some(libname), _ZdaPv, (ptr: *mut c_void), usize); } "_ZdaPvm" => { - hook_func!(Some(libname), _ZdaPvm, (ptr: *mut c_void, _ulong: u64), ()); + hook_func!(Some(libname), _ZdaPvm, (ptr: *mut c_void, _ulong: u64), usize); } "_ZdaPvmSt11align_val_t" => { hook_func!( Some(libname), _ZdaPvmSt11align_val_t, (ptr: *mut c_void, _ulong: u64, _alignment: usize), - () + usize ); } "_ZdaPvRKSt9nothrow_t" => { @@ -683,7 +901,7 @@ impl AsanRuntime { Some(libname), _ZdaPvRKSt9nothrow_t, (ptr: *mut c_void, _nothrow: *const c_void), - () + usize ); } "_ZdaPvSt11align_val_t" => { @@ -691,7 +909,7 @@ impl AsanRuntime { Some(libname), _ZdaPvSt11align_val_t, (ptr: *mut c_void, _alignment: usize), - () + usize ); } "_ZdaPvSt11align_val_tRKSt9nothrow_t" => { @@ -699,21 +917,21 @@ impl AsanRuntime { Some(libname), _ZdaPvSt11align_val_tRKSt9nothrow_t, (ptr: *mut c_void, _alignment: usize, _nothrow: *const c_void), - () + usize ); } "_ZdlPv" => { - hook_func!(Some(libname), _ZdlPv, (ptr: *mut c_void), ()); + hook_func!(Some(libname), _ZdlPv, (ptr: *mut c_void), usize); } "_ZdlPvm" => { - hook_func!(Some(libname), _ZdlPvm, (ptr: *mut c_void, _ulong: u64), ()); + hook_func!(Some(libname), _ZdlPvm, (ptr: *mut c_void, _ulong: u64), usize); } "_ZdlPvmSt11align_val_t" => { hook_func!( Some(libname), _ZdlPvmSt11align_val_t, (ptr: *mut c_void, _ulong: u64, _alignment: usize), - () + usize ); } "_ZdlPvRKSt9nothrow_t" => { @@ -721,7 +939,7 @@ impl AsanRuntime { Some(libname), _ZdlPvRKSt9nothrow_t, (ptr: *mut c_void, _nothrow: *const c_void), - () + usize ); } "_ZdlPvSt11align_val_t" => { @@ -729,7 +947,7 @@ impl AsanRuntime { Some(libname), _ZdlPvSt11align_val_t, (ptr: *mut c_void, _alignment: usize), - () + usize ); } "_ZdlPvSt11align_val_tRKSt9nothrow_t" => { @@ -737,14 +955,14 @@ impl AsanRuntime { Some(libname), _ZdlPvSt11align_val_tRKSt9nothrow_t, (ptr: *mut c_void, _alignment: usize, _nothrow: *const c_void), - () + usize ); } _ => {} } } } - log::info!("Hooking libc functions"); + #[cfg(not(windows))] hook_func!( None, mmap, @@ -758,15 +976,18 @@ impl AsanRuntime { ), *mut c_void ); + #[cfg(not(windows))] hook_func!(None, munmap, (addr: *const c_void, length: usize), i32); // Hook libc functions which may access allocated memory + #[cfg(not(windows))] hook_func!( None, write, (fd: i32, buf: *const c_void, count: usize), usize ); + #[cfg(not(windows))] hook_func!(None, read, (fd: i32, buf: *mut c_void, count: usize), usize); hook_func!( None, @@ -786,19 +1007,20 @@ impl AsanRuntime { (dest: *mut c_void, src: *const c_void, n: usize), *mut c_void ); - #[cfg(not(target_vendor = "apple"))] + #[cfg(not(any(target_vendor = "apple", windows)))] hook_func!( None, mempcpy, (dest: *mut c_void, src: *const c_void, n: usize), *mut c_void ); - hook_func!( - None, - memmove, - (dest: *mut c_void, src: *const c_void, n: usize), - *mut c_void - ); + // #[cfg(not(windows))] + // hook_func!( + // None, + // memmove, + // (dest: *mut c_void, src: *const c_void, n: usize), + // *mut c_void + // ); hook_func!( None, memset, @@ -811,13 +1033,14 @@ impl AsanRuntime { (s: *mut c_void, c: i32, n: usize), *mut c_void ); - #[cfg(not(target_vendor = "apple"))] + #[cfg(not(any(target_vendor = "apple", windows)))] hook_func!( None, memrchr, (s: *mut c_void, c: i32, n: usize), *mut c_void ); + #[cfg(not(windows))] hook_func!( None, memmem, @@ -829,25 +1052,27 @@ impl AsanRuntime { ), *mut c_void ); - #[cfg(not(target_os = "android"))] - hook_func!(None, bzero, (s: *mut c_void, n: usize), ()); - #[cfg(not(any(target_os = "android", target_vendor = "apple")))] - hook_func!(None, explicit_bzero, (s: *mut c_void, n: usize), ()); - #[cfg(not(target_os = "android"))] - hook_func!( - None, - bcmp, - (s1: *const c_void, s2: *const c_void, n: usize), - i32 - ); + #[cfg(not(any(target_os = "android", windows)))] + hook_func!(None, bzero, (s: *mut c_void, n: usize), usize); + #[cfg(not(any(target_os = "android", target_vendor = "apple", windows)))] + hook_func!(None, explicit_bzero, (s: *mut c_void, n: usize),usize); + // #[cfg(not(any(target_os = "android", windows)))] + // hook_func!( + // None, + // bcmp, + // (s1: *const c_void, s2: *const c_void, n: usize), + // i32 + // ); hook_func!(None, strchr, (s: *mut c_char, c: i32), *mut c_char); hook_func!(None, strrchr, (s: *mut c_char, c: i32), *mut c_char); + #[cfg(not(windows))] hook_func!( None, strcasecmp, (s1: *const c_char, s2: *const c_char), i32 ); + #[cfg(not(windows))] hook_func!( None, strncasecmp, @@ -879,13 +1104,17 @@ impl AsanRuntime { (dest: *mut c_char, src: *const c_char, n: usize), *mut c_char ); + #[cfg(not(windows))] hook_func!( None, stpcpy, (dest: *mut c_char, src: *const c_char), *mut c_char ); + #[cfg(not(windows))] hook_func!(None, strdup, (s: *const c_char), *mut c_char); + #[cfg(windows)] + hook_func!(None, _strdup, (s: *const c_char), *mut c_char); hook_func!(None, strlen, (s: *const c_char), usize); hook_func!(None, strnlen, (s: *const c_char, n: usize), usize); hook_func!( @@ -894,6 +1123,7 @@ impl AsanRuntime { (haystack: *const c_char, needle: *const c_char), *mut c_char ); + #[cfg(not(windows))] hook_func!( None, strcasestr, @@ -937,7 +1167,8 @@ impl AsanRuntime { #[cfg(target_arch = "x86_64")] #[allow(clippy::cast_sign_loss)] #[allow(clippy::too_many_lines)] - extern "C" fn handle_trap(&mut self) { + extern "system" fn handle_trap(&mut self) { + self.hooks_enabled = false; self.dump_registers(); let fault_address = self.regs[17]; @@ -1101,7 +1332,7 @@ impl AsanRuntime { #[cfg(target_arch = "aarch64")] #[allow(clippy::cast_sign_loss)] // for displacement #[allow(clippy::too_many_lines)] - extern "C" fn handle_trap(&mut self) { + extern "system" fn handle_trap(&mut self) { let mut actual_pc = self.regs[31]; actual_pc = match self.stalked_addresses.get(&actual_pc) { //get the pc associated with the trapped insn @@ -1283,539 +1514,141 @@ impl AsanRuntime { log::info!("instrumented rip: {:x}", self.regs[16]); log::info!("fault address: {:x}", self.regs[17]); log::info!("actual rip: {:x}", self.regs[18]); + log::info!("stack: "); + for i in 0..32 { + log::info!("{:x}", unsafe { + ((self.regs[5] + i * 8) as *const u64).read() + }); + } } - // https://godbolt.org/z/oajhcP5sv + // https://godbolt.org/z/ah8vG8sWo /* #include #include - uint8_t shadow_bit = 44; + uint8_t shadow_bit = 8; + uint8_t bit = 3; + uint64_t result = 0; + void handle_trap(uint64_t true_rip); + uint64_t generate_shadow_check_blob(uint64_t start, uint64_t true_rip){ + uint64_t shadow_base = (1ULL << shadow_bit); + if (shadow_base * 3 > start || start >= shadow_base *4) + return 0; - uint64_t generate_shadow_check_function(uint64_t start, uint64_t size){ - // calculate the shadow address uint64_t addr = 0; addr = addr + (start >> 3); uint64_t mask = (1ULL << (shadow_bit + 1)) - 1; + addr = addr & mask; addr = addr + (1ULL << shadow_bit); - if(size == 0){ - // goto return_success - return 1; - } - else{ - // check if the ptr is not aligned to 8 bytes - uint8_t remainder = start & 0b111; - if(remainder != 0){ - // we need to test the high bits from the first shadow byte - uint8_t shift; - if(size < 8){ - shift = size; - } - else{ - shift = 8 - remainder; - } - // goto check_bits - uint8_t mask = (1 << shift) - 1; - - // bitwise reverse for amd64 :< - // https://gist.github.com/yantonov/4359090 - // we need 16bit number here, (not 8bit) - uint16_t val = *(uint16_t *)addr; - val = (val & 0xff00) >> 8 | (val & 0x00ff) << 8; - val = (val & 0xf0f0) >> 4 | (val & 0x0f0f) << 4; - val = (val & 0xcccc) >> 2 | (val & 0x3333) << 2; - val = (val & 0xaaaa) >> 1 | (val & 0x5555) << 1; - val = (val >> 8) | (val << 8); // swap the byte - val = (val >> remainder); - if((val & mask) != mask){ - // goto return failure - return 0; - } - - size = size - shift; - addr += 1; - } - - // no_start_offset - uint64_t num_shadow_bytes = size >> 3; - uint64_t mask = -1; - - while(true){ - if(num_shadow_bytes < 8){ - // goto less_than_8_shadow_bytes_remaining - break; - } - else{ - uint64_t val = *(uint64_t *)addr; - addr += 8; - if(val != mask){ - // goto return failure - return 0; - } - num_shadow_bytes -= 8; - size -= 64; - } - } - - while(true){ - if(num_shadow_bytes < 1){ - // goto check_trailing_bits - break; - } - else{ - uint8_t val = *(uint8_t *)addr; - addr += 1; - if(val != 0xff){ - // goto return failure - return 0; - } - num_shadow_bytes -= 1; - size -= 8; - } - } - - if(size == 0){ - // goto return success - return 1; - } - - uint8_t mask2 = ((1 << (size & 0b111)) - 1); - uint8_t val = *(uint8_t *)addr; - val = (val & 0xf0) >> 4 | (val & 0x0f) << 4; - val = (val & 0xff) >> 2 | (val & 0x33) << 2; - val = (val & 0xaa) >> 1 | (val & 0x55) << 1; + uint8_t remainder = start & 0b111; + uint16_t val = *(uint16_t *)addr; + val = (val >> remainder); - if((val & mask2) != mask2){ - // goto return failure - return 0; - } - return 1; + uint8_t mask2 = (1 << bit) - 1; + if((val & mask2) != mask2){ + // failure + handle_trap(true_rip); } - } - */ - #[cfg(target_arch = "x86_64")] - #[allow(clippy::unused_self, clippy::identity_op)] - #[allow(clippy::too_many_lines)] - fn generate_shadow_check_function(&mut self) { - use std::fs::File; + return 0; - let shadow_bit = self.allocator.shadow_bit(); - let mut ops = dynasmrt::VecAssembler::::new(0); - - // Rdi start, Rsi size - dynasm!(ops - ; .arch x64 - ; mov cl, BYTE shadow_bit as i8 - ; mov r10, -2 - ; shl r10, cl - ; mov eax, 1 - ; mov edx, 1 - ; shl rdx, cl - ; test rsi, rsi - ; je >LBB0_15 - ; mov rcx, rdi - ; shr rcx, 3 - ; not r10 - ; and r10, rcx - ; add r10, rdx - ; and edi, 7 - ; je >LBB0_4 - ; mov cl, 8 - ; sub cl, dil - ; cmp rsi, 8 - ; movzx ecx, cl - ; mov r8d, esi - ; cmovae r8d, ecx - ; mov r9d, -1 - ; mov ecx, r8d - ; shl r9d, cl - ; movzx ecx, WORD [r10] - ; rol cx, 8 - ; mov edx, ecx - ; shr edx, 4 - ; and edx, 3855 - ; shl ecx, 4 - ; and ecx, -3856 - ; or ecx, edx - ; mov edx, ecx - ; shr edx, 2 - ; and edx, 13107 - ; and ecx, -3277 - ; lea ecx, [rdx + 4*rcx] - ; mov edx, ecx - ; shr edx, 1 - ; and edx, 21845 - ; and ecx, -10923 - ; lea ecx, [rdx + 2*rcx] - ; rol cx, 8 - ; movzx edx, cx - ; mov ecx, edi - ; shr edx, cl - ; not r9d - ; movzx ecx, r9b - ; and edx, ecx - ; cmp edx, ecx - ; jne >LBB0_11 - ; movzx ecx, r8b - ; sub rsi, rcx - ; add r10, 1 - ;LBB0_4: - ; mov r8, rsi - ; shr r8, 3 - ; mov r9, r8 - ; and r9, -8 - ; mov edi, r8d - ; and edi, 7 - ; add r9, r10 - ; and esi, 63 - ; mov rdx, r8 - ; mov rcx, r10 - ;LBB0_5: - ; cmp rdx, 7 - ; jbe >LBB0_8 - ; add rdx, -8 - ; cmp QWORD [rcx], -1 - ; lea rcx, [rcx + 8] - ; je LBB0_11 - ;LBB0_8: - ; lea rcx, [8*rdi] - ; sub rsi, rcx - ;LBB0_9: - ; test rdi, rdi - ; je >LBB0_13 - ; add rdi, -1 - ; cmp BYTE [r9], -1 - ; lea r9, [r9 + 1] - ; je LBB0_15 - ; and sil, 7 - ; mov dl, -1 - ; mov ecx, esi - ; shl dl, cl - ; not dl - ; mov cl, BYTE [r8 + r10] - ; rol cl, 4 - ; mov eax, ecx - ; shr al, 2 - ; shl cl, 2 - ; and cl, -52 - ; or cl, al - ; mov eax, ecx - ; shr al, 1 - ; and al, 85 - ; add cl, cl - ; and cl, -86 - ; or cl, al - ; and cl, dl - ; xor eax, eax - ; cmp cl, dl - ; sete al - ;LBB0_15: - ; ret - ); - let blob = ops.finalize().unwrap(); - unsafe { - let mapping = mmap::( - None, - NonZeroUsize::new_unchecked(0x1000), - ProtFlags::all(), - MapFlags::MAP_ANON | MapFlags::MAP_PRIVATE, - None, - 0, - ) - .unwrap(); - blob.as_ptr() - .copy_to_nonoverlapping(mapping as *mut u8, blob.len()); - self.shadow_check_func = Some(std::mem::transmute::< - *mut u8, - extern "C" fn(*const c_void, usize) -> bool, - >(mapping as *mut u8)); - } } + */ - #[cfg(target_arch = "aarch64")] - // identity_op appears to be a false positive in ubfx - #[allow(clippy::unused_self, clippy::identity_op, clippy::too_many_lines)] - fn generate_shadow_check_function(&mut self) { - use std::fs::File; - - let shadow_bit = self.allocator.shadow_bit(); - let mut ops = dynasmrt::VecAssembler::::new(0); - dynasm!(ops - ; .arch aarch64 + /* - // calculate the shadow address - ; mov x5, #0 - // ; add x5, xzr, x5, lsl #shadow_bit - ; add x5, x5, x0, lsr #3 - ; ubfx x5, x5, #0, #(shadow_bit + 1) - ; mov x6, #1 - ; add x5, x5, x6, lsl #shadow_bit - - ; cmp x1, #0 - ; b.eq >return_success - // check if the ptr is not aligned to 8 bytes - ; ands x6, x0, #7 - ; b.eq >no_start_offset - - // we need to test the high bits from the first shadow byte - ; ldrh w7, [x5, #0] - ; rev16 w7, w7 - ; rbit w7, w7 - ; lsr x7, x7, #16 - ; lsr x7, x7, x6 - - ; cmp x1, #8 - ; b.lt >dont_fill_to_8 - ; mov x2, #8 - ; sub x6, x2, x6 - ; b >check_bits - ; dont_fill_to_8: - ; mov x6, x1 - ; check_bits: - ; mov x2, #1 - ; lsl x2, x2, x6 - ; sub x4, x2, #1 - - // if shadow_bits & size_to_test != size_to_test: fail - ; and x7, x7, x4 - ; cmp x7, x4 - ; b.ne >return_failure - - // size -= size_to_test - ; sub x1, x1, x6 - // shadow_addr += 1 (we consumed the initial byte in the above test) - ; add x5, x5, 1 - - ; no_start_offset: - // num_shadow_bytes = size / 8 - ; lsr x4, x1, #3 - ; eor x3, x3, x3 - ; sub x3, x3, #1 - - // if num_shadow_bytes < 8; then goto check_bytes; else check_8_shadow_bytes - ; check_8_shadow_bytes: - ; cmp x4, #0x8 - ; b.lt >less_than_8_shadow_bytes_remaining - ; ldr x7, [x5], #8 - ; cmp x7, x3 - ; b.ne >return_failure - ; sub x4, x4, #8 - ; sub x1, x1, #64 - ; b check_trailing_bits - ; ldrb w7, [x5], #1 - ; cmp w7, #0xff - ; b.ne >return_failure - ; sub x4, x4, #1 - ; sub x1, x1, #8 - ; b return_success - - ; and x4, x1, #7 - ; mov x2, #1 - ; lsl x2, x2, x4 - ; sub x4, x2, #1 - - ; ldrh w7, [x5, #0] - ; rev16 w7, w7 - ; rbit w7, w7 - ; lsr x7, x7, #16 - ; and x7, x7, x4 - ; cmp x7, x4 - ; b.ne >return_failure - - ; return_success: - ; mov x0, #1 - ; b >prologue - - ; return_failure: - ; mov x0, #0 - - - ; prologue: - ; ret - ); + FRIDA ASAN IMPLEMENTATION DETAILS - let blob = ops.finalize().unwrap(); + The format of Frida's ASAN is signficantly different from LLVM ASAN. - // apple aarch64 requires MAP_JIT to allocates WX pages - #[cfg(target_vendor = "apple")] - let map_flags = MapFlags::MAP_ANON | MapFlags::MAP_PRIVATE | MapFlags::MAP_JIT; - #[cfg(not(target_vendor = "apple"))] - let map_flags = MapFlags::MAP_ANON | MapFlags::MAP_PRIVATE; + In Frida ASAN, we attempt to find the lowest possible bit such that there is no mapping with that bit. That is to say, for some bit x, there is no mapping greater than + 1 << x. This is our shadow base and is similar to Ultra compact shadow in LLVM ASAN. Unlike ASAN where 0 represents a poisoned byte and 1 represents an unpoisoned byte, in Frida-ASAN - unsafe { - let mapping = mmap::( - None, - NonZeroUsize::try_from(0x1000).unwrap(), - ProtFlags::all(), - map_flags, - None, - 0, - ) - .unwrap(); - - // on apple aarch64, WX pages can't be both writable and executable at the same time. - // pthread_jit_write_protect_np flips them from executable (1) to writable (0) - #[cfg(all(target_vendor = "apple", target_arch = "aarch64"))] - { - libc::pthread_jit_write_protect_np(0); - } + The reasoning for this is that new pages are zeroed, so, by default, every qword is poisoned and we must explicitly unpoison any byte. - blob.as_ptr() - .copy_to_nonoverlapping(mapping as *mut u8, blob.len()); + Much like LLVM ASAN, shadow bytes are qword based. This is to say that each shadow byte maps to one qword. The shadow calculation is as follows: + (1ULL << shadow_bit) | (address >> 3) - #[cfg(all(target_vendor = "apple", target_arch = "aarch64"))] - { - libc::pthread_jit_write_protect_np(1); - } - self.shadow_check_func = Some(std::mem::transmute::< - *mut u8, - extern "C" fn(*const c_void, usize) -> bool, - >(mapping as *mut u8)); - } - } + The format of a shadow bit is a bitmask. Each bit represents if a byte in the qword is valid starting from the first bit. So, something like 0b11100000 indicates that only the first 3 bytes in the associated qword are valid. - // https://godbolt.org/z/ah8vG8sWo - /* - #include - #include - uint8_t shadow_bit = 8; - uint8_t bit = 3; - uint64_t generate_shadow_check_blob(uint64_t start){ - uint64_t addr = 0; - addr = addr + (start >> 3); - uint64_t mask = (1ULL << (shadow_bit + 1)) - 1; - addr = addr & mask; - addr = addr + (1ULL << shadow_bit); - - uint8_t remainder = start & 0b111; - uint16_t val = *(uint16_t *)addr; - val = (val & 0xff00) >> 8 | (val & 0x00ff) << 8; - val = (val & 0xf0f0) >> 4 | (val & 0x0f0f) << 4; - val = (val & 0xcccc) >> 2 | (val & 0x3333) << 2; - val = (val & 0xaaaa) >> 1 | (val & 0x5555) << 1; - val = (val >> 8) | (val << 8); // swap the byte - val = (val >> remainder); - - uint8_t mask2 = (1 << bit) - 1; - if((val & mask2) == mask2){ - // success - return 0; - } - else{ - // failure - return 1; - } - } */ #[cfg(target_arch = "x86_64")] #[allow(clippy::unused_self)] - fn generate_shadow_check_blob(&mut self, bit: u32) -> Box<[u8]> { + fn generate_shadow_check_blob(&mut self, size: u32) -> Box<[u8]> { let shadow_bit = self.allocator.shadow_bit(); - // Rcx, Rax, Rdi, Rdx, Rsi are used, so we save them in emit_shadow_check + // Rcx, Rax, Rdi, Rdx, Rsi, R8 are used, so we save them in emit_shadow_check + //at this point RDI contains the + let mask_shift = 32 - size; macro_rules! shadow_check{ ($ops:ident, $bit:expr) => {dynasm!($ops ; .arch x64 - ; mov cl, BYTE shadow_bit as i8 - ; mov rax, -2 - ; shl rax, cl - ; mov rdx, rdi - ; shr rdx, 3 - ; not rax - ; and rax, rdx - ; mov edx, 1 - ; shl rdx, cl - ; movzx eax, WORD [rax + rdx] - ; rol ax, 8 - ; mov ecx, eax - ; shr ecx, 4 - ; and ecx, 3855 - ; shl eax, 4 - ; and eax, -3856 - ; or eax, ecx - ; mov ecx, eax - ; shr ecx, 2 - ; and ecx, 13107 - ; and eax, -3277 - ; lea eax, [rcx + 4*rax] - ; mov ecx, eax - ; shr ecx, 1 - ; and ecx, 21845 - ; and eax, -10923 - ; lea eax, [rcx + 2*rax] - ; rol ax, 8 - ; movzx edx, ax - ; and dil, 7 - ; mov ecx, edi - ; shr edx, cl - ; mov cl, BYTE bit as i8 - ; mov eax, -1 - ; shl eax, cl - ; not eax - ; movzx ecx, al - ; and edx, ecx - ; xor eax, eax - ; cmp edx, ecx - ; je >done - ; lea rsi, [>done] // leap 10 bytes forward - ; nop // jmp takes 10 bytes at most so we want to allocate 10 bytes buffer (?) - ; nop - ; nop - ; nop - ; nop - ; nop - ; nop - ; nop - ; nop - ; nop + // ; int3 + ; mov rdx, 1 + ; shl rdx, shadow_bit as i8 //rcx now contains the mask + ; mov rcx, rdi //copy address into rdx + ; and rcx, 7 //rsi now contains the offset for unaligned accesses + ; shr rdi, 3 //rdi now contains the shadow byte offset + ; add rdi, rdx //add rdx and rdi to get the address of the shadow byte. rdi now contains the shadow address + ; mov edx, [rdi] //load 4 shadow bytes. We load 4 just in case of an unaligned access + ; bswap edx //bswap to get it into an acceptable form + ; shl edx, cl //this shifts by the unaligned access offset. why does x86 require cl... + ; mov edi, -1 //fill edi with all 1s + ; shl edi, mask_shift as i8 //edi now contains mask. this shl functionally creates a bitmask with the top `size` bits as 1s + ; and edx, edi //and it to see if the top bits are enabled in edx + ; cmp edx, edi //if the mask and the and'd value are the same, we're good + ; je >done + ; lea rsi, [>done] // leap 10 bytes forward + ; nop // jmp takes 10 bytes at most so we want to allocate 10 bytes buffer (?) + ; nop + ; nop + ; nop + ; nop + ; nop + ; nop + ; nop + ; nop + ; nop ;done: );}; } let mut ops = dynasmrt::VecAssembler::::new(0); shadow_check!(ops, bit); let ops_vec = ops.finalize().unwrap(); - ops_vec[..ops_vec.len() - 10].to_vec().into_boxed_slice() //???? + ops_vec[..ops_vec.len() - 10].to_vec().into_boxed_slice() //subtract 10 because } #[cfg(target_arch = "aarch64")] #[allow(clippy::unused_self)] - fn generate_shadow_check_blob(&mut self, bit: u32) -> Box<[u8]> { + fn generate_shadow_check_blob(&mut self, width: u32) -> Box<[u8]> { + /*x0 contains the shadow address + x0 and x1 are saved by the asan_check + The maximum size this supports is up to 25 bytes. This is because we load 4 bytes of the shadow value. And, in the case that we have a misaligned address with an offset of 7 into the word. For example, if we load 25 bytes from 0x1007 - [0x1007,0x101f], then we require the shadow values from 0x1000, 0x1008, 0x1010, and 0x1018 */ + let shadow_bit = self.allocator.shadow_bit(); macro_rules! shadow_check { - ($ops:ident, $bit:expr) => {dynasm!($ops + ($ops:ident, $width:expr) => {dynasm!($ops ; .arch aarch64 - +// ; brk #0xe ; stp x2, x3, [sp, #-0x10]! - ; mov x1, #0 + ; mov x1, xzr // ; add x1, xzr, x1, lsl #shadow_bit ; add x1, x1, x0, lsr #3 ; ubfx x1, x1, #0, #(shadow_bit + 1) ; mov x2, #1 ; add x1, x1, x2, lsl #shadow_bit - ; ldrh w1, [x1, #0] - ; and x0, x0, #7 - ; rev16 w1, w1 + ; ldr w1, [x1, #0] //w1 contains our shadow check + ; and x0, x0, #7 //x0 is the offset for unaligned accesses + ; rev32 x1, x1 ; rbit w1, w1 - ; lsr x1, x1, #16 - ; lsr x1, x1, x0 + ; lsr w1, w1, w0 //x1 now contains our shadow value ; ldp x2, x3, [sp], 0x10 - ; tbnz x1, #$bit, >done - + ; mov w0, #1 + ; add w0, wzr, w0, LSL #$width + ; sub w0, w0, #1 //x0 now contains our bitmask + ; and w1, w0, w1 //and the bitmask and the shadow value + ; cmp w0, w1 //our bitmask and shadow & mask must be the same + ; b.eq >done ; adr x1, >done ; nop // will be replaced by b to report ; done: @@ -1823,38 +1656,44 @@ impl AsanRuntime { } let mut ops = dynasmrt::VecAssembler::::new(0); - shadow_check!(ops, bit); + shadow_check!(ops, width); let ops_vec = ops.finalize().unwrap(); - ops_vec[..ops_vec.len() - 4].to_vec().into_boxed_slice() + ops_vec[..ops_vec.len() - 4].to_vec().into_boxed_slice() //we don't need the last nop so subtract by 4 } #[cfg(target_arch = "aarch64")] #[allow(clippy::unused_self)] - fn generate_shadow_check_exact_blob(&mut self, val: u64) -> Box<[u8]> { + fn generate_shadow_check_large_blob(&mut self, width: u32) -> Box<[u8]> { + //x0 contains the shadow address + //x0 and x1 are saved by the asan_check + //large blobs require 16 byte alignment as they are only possible with vector insns, so just abuse that + + //This is used for checking shadow blobs that are larger than 25 bytes + + assert!(width <= 64, "width must be <= 64"); + let shift = 64 - width; let shadow_bit = self.allocator.shadow_bit(); macro_rules! shadow_check_exact { - ($ops:ident, $val:expr) => {dynasm!($ops + ($ops:ident, $shift:expr) => {dynasm!($ops ; .arch aarch64 ; stp x2, x3, [sp, #-0x10]! - ; mov x1, #0 + ; mov x1, xzr // ; add x1, xzr, x1, lsl #shadow_bit ; add x1, x1, x0, lsr #3 ; ubfx x1, x1, #0, #(shadow_bit + 1) ; mov x2, #1 ; add x1, x1, x2, lsl #shadow_bit - ; ldrh w1, [x1, #0] - ; and x0, x0, #7 - ; rev16 w1, w1 - ; rbit w1, w1 - ; lsr x1, x1, #16 - ; lsr x1, x1, x0 - ; .dword -717536768 // 0xd53b4200 //mrs x0, NZCV - ; mov x2, $val - ; ands x1, x1, x2 + ; ldr x1, [x1, #0] //x1 contains our shadow check + ; rev64 x1, x1 + ; rbit x1, x1 //x1 now contains our shadow value ; ldp x2, x3, [sp], 0x10 - ; b.ne >done - + ; mov x0, xzr + ; sub x0, x0, #1 //gives us all 1s + ; lsr x0, x0, #$shift //x0 now contains our bitmask + ; and x1, x0, x1 //and the bitmask and the shadow value and put it in x1 + ; cmp x0, x1 //our bitmask and shadow & mask must be the same to ensure that the bytes are valid + ; b.eq >done ; adr x1, >done ; nop // will be replaced by b to report ; done: @@ -1862,7 +1701,7 @@ impl AsanRuntime { } let mut ops = dynasmrt::VecAssembler::::new(0); - shadow_check_exact!(ops, val); + shadow_check_exact!(ops, shift); let ops_vec = ops.finalize().unwrap(); ops_vec[..ops_vec.len() - 4].to_vec().into_boxed_slice() } @@ -1880,7 +1719,7 @@ impl AsanRuntime { ; .arch x64 ; report: ; mov rdi, [>self_regs_addr] // load self.regs into rdi - ; mov [rdi + 0x80], rsi // return address is loaded into rsi in generate_shadow_check_blob + ; mov [rdi + 0x80], rsi // return address is loaded into rsi in generate_shadow_check_blob. rsi is the address of done ; mov [rdi + 0x8], rbx ; mov [rdi + 0x20], rbp ; mov [rdi + 0x28], rsp @@ -1911,6 +1750,7 @@ impl AsanRuntime { ; mov [rsi + 0x38], rdi ; mov rdi, [>self_addr] + ; mov rcx, [>self_addr] ; mov rsi, [>trap_func] // Align the rsp to 16bytes boundary @@ -1952,9 +1792,9 @@ impl AsanRuntime { self.blob_check_mem_byte = Some(self.generate_shadow_check_blob(1)); self.blob_check_mem_halfword = Some(self.generate_shadow_check_blob(2)); - self.blob_check_mem_dword = Some(self.generate_shadow_check_blob(3)); - self.blob_check_mem_qword = Some(self.generate_shadow_check_blob(4)); - self.blob_check_mem_16bytes = Some(self.generate_shadow_check_blob(5)); + self.blob_check_mem_dword = Some(self.generate_shadow_check_blob(4)); + self.blob_check_mem_qword = Some(self.generate_shadow_check_blob(8)); + self.blob_check_mem_16bytes = Some(self.generate_shadow_check_blob(16)); } /// @@ -2069,19 +1909,19 @@ impl AsanRuntime { self.blob_report = Some(ops_report.finalize().unwrap().into_boxed_slice()); - self.blob_check_mem_byte = Some(self.generate_shadow_check_blob(0)); - self.blob_check_mem_halfword = Some(self.generate_shadow_check_blob(1)); - self.blob_check_mem_dword = Some(self.generate_shadow_check_blob(2)); - self.blob_check_mem_qword = Some(self.generate_shadow_check_blob(3)); - self.blob_check_mem_16bytes = Some(self.generate_shadow_check_blob(4)); - - self.blob_check_mem_3bytes = Some(self.generate_shadow_check_exact_blob(3)); - self.blob_check_mem_6bytes = Some(self.generate_shadow_check_exact_blob(6)); - self.blob_check_mem_12bytes = Some(self.generate_shadow_check_exact_blob(12)); - self.blob_check_mem_24bytes = Some(self.generate_shadow_check_exact_blob(24)); - self.blob_check_mem_32bytes = Some(self.generate_shadow_check_exact_blob(32)); - self.blob_check_mem_48bytes = Some(self.generate_shadow_check_exact_blob(48)); - self.blob_check_mem_64bytes = Some(self.generate_shadow_check_exact_blob(64)); + self.blob_check_mem_byte = Some(self.generate_shadow_check_blob(1)); + self.blob_check_mem_halfword = Some(self.generate_shadow_check_blob(2)); + self.blob_check_mem_dword = Some(self.generate_shadow_check_blob(4)); + self.blob_check_mem_qword = Some(self.generate_shadow_check_blob(8)); + self.blob_check_mem_16bytes = Some(self.generate_shadow_check_blob(16)); + + self.blob_check_mem_3bytes = Some(self.generate_shadow_check_blob(3)); //the below are all possible with vector intrinsics + self.blob_check_mem_6bytes = Some(self.generate_shadow_check_blob(6)); + self.blob_check_mem_12bytes = Some(self.generate_shadow_check_blob(12)); + self.blob_check_mem_24bytes = Some(self.generate_shadow_check_blob(24)); + self.blob_check_mem_32bytes = Some(self.generate_shadow_check_large_blob(32)); //this is possible with ldp q0, q1, [sp]. This must at least 16 byte aligned + self.blob_check_mem_48bytes = Some(self.generate_shadow_check_large_blob(48)); + self.blob_check_mem_64bytes = Some(self.generate_shadow_check_large_blob(64)); } /// Get the blob which implements the report funclet @@ -2276,7 +2116,7 @@ impl AsanRuntime { } /// Checks if the current instruction is interesting for address sanitization. - #[cfg(all(target_arch = "x86_64", unix))] + #[cfg(target_arch = "x86_64")] #[inline] #[must_use] #[allow(clippy::result_unit_err)] @@ -2285,7 +2125,15 @@ impl AsanRuntime { _address: u64, instr: &Insn, ) -> Option<(u8, X86Register, X86Register, u8, i32)> { - let cs_instr = frida_to_cs(decoder, instr); + let result = frida_to_cs(decoder, instr); + + if let Err(e) = result { + log::error!("{}", e); + return None; + } + + let cs_instr = result.unwrap(); + let mut operands = vec![]; for operand_idx in 0..cs_instr.operand_count() { operands.push(cs_instr.operand(operand_idx)); @@ -2308,7 +2156,7 @@ impl AsanRuntime { for operand in operands { if operand.is_memory() { - // log::trace!("{:#?}", operand) + // log::trace!("{:#?}", operand); // if we reach this point // because in x64 there's no mem to mem inst, just return the first memory operand @@ -2330,11 +2178,12 @@ impl AsanRuntime { #[inline] #[allow(clippy::too_many_lines)] #[allow(clippy::too_many_arguments)] - #[cfg(all(target_arch = "x86_64", unix))] + #[cfg(target_arch = "x86_64")] pub fn emit_shadow_check( &mut self, address: u64, output: &StalkerOutput, + instruction_size: usize, width: u8, basereg: X86Register, indexreg: X86Register, @@ -2369,17 +2218,17 @@ impl AsanRuntime { { let after_report_impl = writer.code_offset() + 2; - #[cfg(target_arch = "x86_64")] writer.put_jmp_near_label(after_report_impl); - #[cfg(target_arch = "aarch64")] - writer.put_b_label(after_report_impl); self.current_report_impl = writer.pc(); - #[cfg(unix)] writer.put_bytes(self.blob_report()); writer.put_label(after_report_impl); } + // if disp == 0x102 { + // log::trace!("BREAKING!"); + // writer.put_bytes(&[0xcc]); + // } /* Save registers that we'll use later in shadow_check_blob | addr | rip | @@ -2396,7 +2245,8 @@ impl AsanRuntime { writer.put_push_reg(X86Register::Rdx); writer.put_push_reg(X86Register::Rcx); writer.put_push_reg(X86Register::Rax); - + writer.put_push_reg(X86Register::Rbp); + writer.put_push_reg(X86Register::R8); /* Things are a bit different when Rip is either base register or index register. Suppose we have an instruction like @@ -2410,14 +2260,15 @@ impl AsanRuntime { match basereg { Some(reg) => match reg { X86Register::Rip => { - writer.put_mov_reg_address(X86Register::Rdi, true_rip); + writer + .put_mov_reg_address(X86Register::Rdi, true_rip + instruction_size as u64); } X86Register::Rsp => { // In this case rsp clobbered writer.put_lea_reg_reg_offset( X86Register::Rdi, X86Register::Rsp, - redzone_size + 0x8 * 6, + redzone_size + 0x8 * 7, ); } _ => { @@ -2432,18 +2283,19 @@ impl AsanRuntime { match indexreg { Some(reg) => match reg { X86Register::Rip => { - writer.put_mov_reg_address(X86Register::Rsi, true_rip); + writer + .put_mov_reg_address(X86Register::Rsi, true_rip + instruction_size as u64); } X86Register::Rdi => { // In this case rdi is already clobbered, so we want it from the stack (we pushed rdi onto stack before!) - writer.put_mov_reg_reg_offset_ptr(X86Register::Rsi, X86Register::Rsp, 0x20); + writer.put_mov_reg_reg_offset_ptr(X86Register::Rsi, X86Register::Rsp, 0x30); } X86Register::Rsp => { // In this case rsp is also clobbered writer.put_lea_reg_reg_offset( X86Register::Rsi, X86Register::Rsp, - redzone_size + 0x8 * 6, + redzone_size + 0x8 * 7, ); } _ => { @@ -2457,6 +2309,11 @@ impl AsanRuntime { // Scale if scale > 0 { + // if scale == 3 { + // if let Some(X86Register::R8) = indexreg { + // writer.put_bytes(&[0xcc]); + // } + // }kernel writer.put_shl_reg_u8(X86Register::Rsi, scale); } @@ -2468,7 +2325,6 @@ impl AsanRuntime { writer.put_push_reg(X86Register::Rsi); // save true_rip writer.put_push_reg(X86Register::Rdi); // save accessed_address - #[cfg(unix)] let checked: bool = match width { 1 => writer.put_bytes(self.blob_check_mem_byte()), 2 => writer.put_bytes(self.blob_check_mem_halfword()), @@ -2490,6 +2346,8 @@ impl AsanRuntime { writer.put_pop_reg(X86Register::Rdi); writer.put_pop_reg(X86Register::Rsi); + writer.put_pop_reg(X86Register::R8); + writer.put_pop_reg(X86Register::Rbp); writer.put_pop_reg(X86Register::Rax); writer.put_pop_reg(X86Register::Rcx); writer.put_pop_reg(X86Register::Rdx); @@ -2660,7 +2518,7 @@ impl AsanRuntime { Aarch64Register::X0, Aarch64Register::X0, u64::from(displacement_lo), - ); //sub x0, x0, #[displacement & 4095] + ); //sub x0, x0, #[displacement 496] } } else if displacement > 0 { #[allow(clippy::cast_sign_loss)] @@ -2675,12 +2533,12 @@ impl AsanRuntime { } else { let displacement_hi = displacement / 4096; let displacement_lo = displacement % 4096; - writer.put_bytes(&(0x91400000u32 | (displacement_hi << 10)).to_le_bytes()); + writer.put_bytes(&(0x91400000u32 | (displacement_hi << 10)).to_le_bytes()); //add x0, x0, #[displacement/4096] LSL#12 writer.put_add_reg_reg_imm( Aarch64Register::X0, Aarch64Register::X0, u64::from(displacement_lo), - ); + ); //add x0, x0, #[displacement % 4096] } } // Insert the check_shadow_mem code blob @@ -2749,9 +2607,11 @@ impl Default for AsanRuntime { suppressed_addresses: Vec::new(), skip_ranges: Vec::new(), continue_on_error: false, - shadow_check_func: None, + hooks: HashMap::new(), + hooks_enabled: false, #[cfg(target_arch = "aarch64")] eh_frame: [0; ASAN_EH_FRAME_DWORD_COUNT], + pc: None, } } } diff --git a/libafl_frida/src/asan/errors.rs b/libafl_frida/src/asan/errors.rs index a6d4482dca..44d1d9d9ea 100644 --- a/libafl_frida/src/asan/errors.rs +++ b/libafl_frida/src/asan/errors.rs @@ -91,7 +91,7 @@ pub(crate) enum AsanError { } impl AsanError { - fn description(&self) -> &str { + pub fn description(&self) -> &str { match self { AsanError::OobRead(_) => "heap out-of-bounds read", AsanError::OobWrite(_) => "heap out-of-bounds write", @@ -114,7 +114,7 @@ impl AsanError { #[derive(Debug, Clone, Serialize, Deserialize, SerdeAny)] pub struct AsanErrors { continue_on_error: bool, - errors: Vec, + pub(crate) errors: Vec, } impl AsanErrors { diff --git a/libafl_frida/src/asan/hook_funcs.rs b/libafl_frida/src/asan/hook_funcs.rs index 7d9589634f..d59dbfda44 100644 --- a/libafl_frida/src/asan/hook_funcs.rs +++ b/libafl_frida/src/asan/hook_funcs.rs @@ -3,7 +3,6 @@ use std::ffi::c_void; use backtrace::Backtrace; use libc::{c_char, wchar_t}; -use nix::libc::memset; use crate::{ alloc::Allocator, @@ -15,11 +14,602 @@ use crate::{ #[allow(clippy::not_unsafe_ptr_arg_deref)] impl AsanRuntime { + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_NtGdiCreateCompatibleDC(&mut self, _hdc: *const c_void) -> *mut c_void { + unsafe { self.allocator_mut().alloc(8, 8) } + } + + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_CreateThread( + &mut self, + thread_attributes: *const c_void, + stack_size: usize, + start_address: *const c_void, + parameter: *const c_void, + creation_flags: i32, + thread_id: *mut i32, + ) -> usize { + extern "system" { + fn CreateThread( + thread_attributes: *const c_void, + stack_size: usize, + start_address: *const c_void, + parameter: *const c_void, + creation_flags: i32, + thread_id: *mut i32, + ) -> usize; + } + unsafe { + CreateThread( + thread_attributes, + stack_size, + start_address, + parameter, + creation_flags, + thread_id, + ) + } + } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_CreateFileMappingW( + &mut self, + file: usize, + file_mapping_attributes: *const c_void, + protect: i32, + maximum_size_high: u32, + maximum_size_low: u32, + name: *const c_void, + ) -> usize { + extern "system" { + fn CreateFileMappingW( + file: usize, + file_mapping_attributes: *const c_void, + protect: i32, + maximum_size_high: u32, + maximum_size_low: u32, + name: *const c_void, + ) -> usize; + } + winsafe::OutputDebugString("In CreateFileMapping\n"); + unsafe { + CreateFileMappingW( + file, + file_mapping_attributes, + protect, + maximum_size_high, + maximum_size_low, + name, + ) + } + } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_LdrLoadDll( + &mut self, + search_path: *const c_void, + charecteristics: *const u32, + dll_name: *const c_void, + base_address: *mut *const c_void, + ) -> usize { + extern "system" { + fn LdrLoadDll( + search_path: *const c_void, + charecteristics: *const u32, + dll_name: *const c_void, + base_address: *mut *const c_void, + ) -> usize; + } + winsafe::OutputDebugString("LdrLoadDll"); + log::trace!("LdrLoadDll"); + let result = unsafe { LdrLoadDll(search_path, charecteristics, dll_name, base_address) }; + self.allocator_mut().unpoison_all_existing_memory(); + result + } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_LdrpCallInitRoutine( + &mut self, + _base_address: *const c_void, + _reason: usize, + _context: usize, + _entry_point: usize, + ) -> usize { + winsafe::OutputDebugString("LdrpCallInitRoutine"); + // let result = unsafe { LdrLoadDll(path, file, flags,x )}; + // self.allocator_mut().unpoison_all_existing_memory(); + // result + 0 + } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_LoadLibraryExW(&mut self, path: *const c_void, file: usize, flags: i32) -> usize { + log::trace!("Loaded library!"); + extern "system" { + fn LoadLibraryExW(path: *const c_void, file: usize, flags: i32) -> usize; + } + let result = unsafe { LoadLibraryExW(path, file, flags) }; + self.allocator_mut().unpoison_all_existing_memory(); + result + } + + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_RtlCreateHeap( + &mut self, + _flags: u32, + _heap_base: *const c_void, + _reserve_size: usize, + _commit_size: usize, + _lock: *const c_void, + _parameters: *const c_void, + ) -> *mut c_void { + 0xc0debeef as *mut c_void + } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_RtlDestroyHeap(&mut self, _handle: *const c_void) -> *mut c_void { + std::ptr::null_mut() + } + + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_HeapAlloc(&mut self, _handle: *mut c_void, flags: u32, size: usize) -> *mut c_void { + let allocator = self.allocator_mut(); + let ret = unsafe { allocator.alloc(size, 8) }; + + if flags & 8 == 8 { + extern "system" { + fn memset(s: *mut c_void, c: i32, n: usize) -> *mut c_void; + } + unsafe { + memset(ret, 0, size); + } + } + if flags & 4 == 4 && ret == std::ptr::null_mut() { + unimplemented!(); + } + ret + } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_RtlAllocateHeap( + &mut self, + _handle: *mut c_void, + flags: u32, + size: usize, + ) -> *mut c_void { + let allocator = self.allocator_mut(); + let ret = unsafe { allocator.alloc(size, 8) }; + + if flags & 8 == 8 { + extern "system" { + fn memset(s: *mut c_void, c: i32, n: usize) -> *mut c_void; + } + unsafe { + memset(ret, 0, size); + } + } + if flags & 4 == 4 && ret == std::ptr::null_mut() { + unimplemented!(); + } + ret + } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_HeapReAlloc( + &mut self, + handle: *mut c_void, + flags: u32, + ptr: *mut c_void, + size: usize, + ) -> *mut c_void { + let allocator = self.allocator_mut(); + if !allocator.is_managed(ptr) { + extern "system" { + fn HeapReAlloc( + handle: *mut c_void, + flags: u32, + ptr: *mut c_void, + size: usize, + ) -> *mut c_void; + } + return unsafe { HeapReAlloc(handle, flags, ptr, size) }; + } + let ret = unsafe { + let ret = allocator.alloc(size, 8); + extern "system" { + fn memcpy(dst: *mut c_void, src: *const c_void, size: usize) -> *mut c_void; + } + memcpy(ret as *mut c_void, ptr, allocator.get_usable_size(ptr)); + allocator.release(ptr); + ret + }; + + if flags & 8 == 8 { + extern "system" { + fn memset(s: *mut c_void, c: i32, n: usize) -> *mut c_void; + } + unsafe { + memset(ret, 0, size); + } + } + if flags & 4 == 4 && ret == std::ptr::null_mut() { + unimplemented!(); + } + if flags & 0x10 == 0x10 && ret != ptr { + unimplemented!(); + } + ret + } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_RtlReAllocateHeap( + &mut self, + handle: *mut c_void, + flags: u32, + ptr: *mut c_void, + size: usize, + ) -> *mut c_void { + let allocator = self.allocator_mut(); + log::trace!("RtlReAllocateHeap({ptr:?}, {size:x})"); + if !allocator.is_managed(ptr) { + extern "system" { + fn HeapReAlloc( + handle: *mut c_void, + flags: u32, + ptr: *mut c_void, + size: usize, + ) -> *mut c_void; + } + return unsafe { HeapReAlloc(handle, flags, ptr, size) }; + } + let ret = unsafe { + let ret = allocator.alloc(size, 8); + extern "system" { + fn memcpy(dst: *mut c_void, src: *const c_void, size: usize) -> *mut c_void; + } + memcpy(ret as *mut c_void, ptr, allocator.get_usable_size(ptr)); + allocator.release(ptr); + ret + }; + + if flags & 8 == 8 { + extern "system" { + fn memset(s: *mut c_void, c: i32, n: usize) -> *mut c_void; + } + unsafe { + memset(ret, 0, size); + } + } + if flags & 4 == 4 && ret == std::ptr::null_mut() { + unimplemented!(); + } + if flags & 0x10 == 0x10 && ret != ptr { + unimplemented!(); + } + ret + } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_RtlFreeHeap( + &mut self, + _handle: *mut c_void, + _flags: u32, + ptr: *mut c_void, + ) -> bool { + self.allocator_mut().is_managed(ptr) + } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_RtlFreeHeap( + &mut self, + _handle: *mut c_void, + _flags: u32, + ptr: *mut c_void, + ) -> usize { + unsafe { self.allocator_mut().release(ptr) }; + 0 + } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_HeapFree( + &mut self, + _handle: *mut c_void, + _flags: u32, + ptr: *mut c_void, + ) -> bool { + self.allocator_mut().is_managed(ptr) + } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_HeapFree(&mut self, _handle: *mut c_void, _flags: u32, ptr: *mut c_void) -> bool { + unsafe { self.allocator_mut().release(ptr) }; + true + } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_HeapSize( + &mut self, + _handle: *mut c_void, + _flags: u32, + ptr: *mut c_void, + ) -> bool { + self.allocator_mut().is_managed(ptr) + } + + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_HeapSize(&mut self, _handle: *mut c_void, _flags: u32, ptr: *mut c_void) -> usize { + self.allocator().get_usable_size(ptr) + } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_RtlSizeHeap( + &mut self, + _handle: *mut c_void, + _flags: u32, + ptr: *mut c_void, + ) -> bool { + self.allocator_mut().is_managed(ptr) + } + + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_RtlSizeHeap( + &mut self, + _handle: *mut c_void, + _flags: u32, + ptr: *mut c_void, + ) -> usize { + self.allocator().get_usable_size(ptr) + } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_RtlValidateHeap( + &mut self, + _handle: *mut c_void, + _flags: u32, + ptr: *mut c_void, + ) -> bool { + self.allocator_mut().is_managed(ptr) + } + + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_RtlValidateHeap( + &mut self, + _handle: *mut c_void, + _flags: u32, + _ptr: *mut c_void, + ) -> bool { + true + } + + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_LocalAlloc(&mut self, flags: u32, size: usize) -> *mut c_void { + let ret = unsafe { self.allocator_mut().alloc(size, 8) }; + + if flags & 0x40 == 0x40 { + extern "system" { + fn memset(s: *mut c_void, c: i32, n: usize) -> *mut c_void; + } + unsafe { + memset(ret, 0, size); + } + } + ret + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_LocalReAlloc(&mut self, mem: *mut c_void, size: usize, _flags: u32) -> *mut c_void { + unsafe { + let ret = self.allocator_mut().alloc(size, 0x8); + if mem != std::ptr::null_mut() && ret != std::ptr::null_mut() { + let old_size = self.allocator_mut().get_usable_size(mem); + let copy_size = if size < old_size { size } else { old_size }; + (mem as *mut u8).copy_to(ret as *mut u8, copy_size); + } + self.allocator_mut().release(mem); + ret + } + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_LocalFree(&mut self, mem: *mut c_void) -> bool { + let res = self.allocator_mut().is_managed(mem); + res + } + + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_LocalFree(&mut self, mem: *mut c_void) -> *mut c_void { + unsafe { self.allocator_mut().release(mem) }; + mem + } + + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_LocalHandle(&mut self, mem: *mut c_void) -> bool { + self.allocator_mut().is_managed(mem) + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_LocalHandle(&mut self, mem: *mut c_void) -> *mut c_void { + mem + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_LocalLock(&mut self, mem: *mut c_void) -> bool { + self.allocator_mut().is_managed(mem) + } + + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_LocalLock(&mut self, mem: *mut c_void) -> *mut c_void { + mem + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_LocalUnlock(&mut self, mem: *mut c_void) -> bool { + self.allocator_mut().is_managed(mem) + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_LocalUnlock(&mut self, _mem: *mut c_void) -> bool { + false + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_LocalSize(&mut self, mem: *mut c_void) -> bool { + self.allocator_mut().is_managed(mem) + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_LocalSize(&mut self, mem: *mut c_void) -> usize { + self.allocator_mut().get_usable_size(mem) + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_LocalFlags(&mut self, mem: *mut c_void) -> bool { + self.allocator_mut().is_managed(mem) + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_LocalFlags(&mut self, _mem: *mut c_void) -> u32 { + 0 + } + + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_GlobalAlloc(&mut self, flags: u32, size: usize) -> *mut c_void { + let ret = unsafe { self.allocator_mut().alloc(size, 8) }; + + if flags & 0x40 == 0x40 { + extern "system" { + fn memset(s: *mut c_void, c: i32, n: usize) -> *mut c_void; + } + unsafe { + memset(ret, 0, size); + } + } + ret + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_GlobalReAlloc( + &mut self, + mem: *mut c_void, + _flags: u32, + size: usize, + ) -> *mut c_void { + unsafe { + let ret = self.allocator_mut().alloc(size, 0x8); + if mem != std::ptr::null_mut() && ret != std::ptr::null_mut() { + let old_size = self.allocator_mut().get_usable_size(mem); + let copy_size = if size < old_size { size } else { old_size }; + (mem as *mut u8).copy_to(ret as *mut u8, copy_size); + } + self.allocator_mut().release(mem); + ret + } + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_GlobalFree(&mut self, mem: *mut c_void) -> bool { + self.allocator_mut().is_managed(mem) + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_GlobalFree(&mut self, mem: *mut c_void) -> *mut c_void { + unsafe { self.allocator_mut().release(mem) }; + mem + } + + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_GlobalHandle(&mut self, mem: *mut c_void) -> bool { + self.allocator_mut().is_managed(mem) + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_GlobalHandle(&mut self, mem: *mut c_void) -> *mut c_void { + mem + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_GlobalLock(&mut self, mem: *mut c_void) -> bool { + self.allocator_mut().is_managed(mem) + } + + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_GlobalLock(&mut self, mem: *mut c_void) -> *mut c_void { + mem + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_GlobalUnlock(&mut self, mem: *mut c_void) -> bool { + self.allocator_mut().is_managed(mem) + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_GlobalUnlock(&mut self, _mem: *mut c_void) -> bool { + false + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_GlobalSize(&mut self, mem: *mut c_void) -> bool { + self.allocator_mut().is_managed(mem) + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_GlobalSize(&mut self, mem: *mut c_void) -> usize { + self.allocator_mut().get_usable_size(mem) + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_check_GlobalFlags(&mut self, mem: *mut c_void) -> bool { + self.allocator_mut().is_managed(mem) + } + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_GlobalFlags(&mut self, _mem: *mut c_void) -> u32 { + 0 + } + #[inline] pub fn hook_malloc(&mut self, size: usize) -> *mut c_void { unsafe { self.allocator_mut().alloc(size, 8) } } + #[inline] + pub fn hook_o_malloc(&mut self, size: usize) -> *mut c_void { + unsafe { self.allocator_mut().alloc(size, 8) } + } + #[allow(non_snake_case)] #[inline] pub fn hook__Znam(&mut self, size: usize) -> *mut c_void { @@ -58,7 +648,7 @@ impl AsanRuntime { pub fn hook__Znwm(&mut self, size: usize) -> *mut c_void { let result = unsafe { self.allocator_mut().alloc(size, 8) }; if result.is_null() { - extern "C" { + extern "system" { fn _ZSt17__throw_bad_allocv(); } @@ -86,7 +676,7 @@ impl AsanRuntime { pub fn hook__ZnwmSt11align_val_t(&mut self, size: usize, alignment: usize) -> *mut c_void { let result = unsafe { self.allocator_mut().alloc(size, alignment) }; if result.is_null() { - extern "C" { + extern "system" { fn _ZSt17__throw_bad_allocv(); } @@ -108,8 +698,16 @@ impl AsanRuntime { unsafe { self.allocator_mut().alloc(size, alignment) } } + #[allow(non_snake_case)] + #[inline] + pub fn hook__o_malloc(&mut self, size: usize) -> *mut c_void { + unsafe { self.allocator_mut().alloc(size, 8) } + } #[inline] pub fn hook_calloc(&mut self, nmemb: usize, size: usize) -> *mut c_void { + extern "system" { + fn memset(s: *mut c_void, c: i32, n: usize) -> *mut c_void; + } let ret = unsafe { self.allocator_mut().alloc(size * nmemb, 8) }; unsafe { memset(ret, 0, size * nmemb); @@ -117,6 +715,20 @@ impl AsanRuntime { ret } + #[allow(non_snake_case)] + #[inline] + pub fn hook__o_calloc(&mut self, nmemb: usize, size: usize) -> *mut c_void { + extern "system" { + fn memset(s: *mut c_void, c: i32, n: usize) -> *mut c_void; + } + let ret = unsafe { self.allocator_mut().alloc(size * nmemb, 8) }; + unsafe { + memset(ret, 0, size * nmemb); + } + ret + } + + #[allow(non_snake_case)] #[inline] #[allow(clippy::cmp_null)] pub fn hook_realloc(&mut self, ptr: *mut c_void, size: usize) -> *mut c_void { @@ -132,6 +744,37 @@ impl AsanRuntime { } } + #[allow(non_snake_case)] + #[inline] + #[allow(clippy::cmp_null)] + pub fn hook__o_realloc(&mut self, ptr: *mut c_void, size: usize) -> *mut c_void { + unsafe { + let ret = self.allocator_mut().alloc(size, 0x8); + if ptr != std::ptr::null_mut() && ret != std::ptr::null_mut() { + let old_size = self.allocator_mut().get_usable_size(ptr); + let copy_size = if size < old_size { size } else { old_size }; + (ptr as *mut u8).copy_to(ret as *mut u8, copy_size); + } + self.allocator_mut().release(ptr); + ret + } + } + + #[allow(non_snake_case)] + #[inline] + pub fn hook_check__o_free(&mut self, ptr: *mut c_void) -> bool { + self.allocator_mut().is_managed(ptr) + } + + #[allow(non_snake_case)] + #[inline] + #[allow(clippy::cmp_null)] + pub fn hook__o_free(&mut self, ptr: *mut c_void) -> usize { + if ptr != std::ptr::null_mut() { + unsafe { self.allocator_mut().release(ptr) } + } + 0 + } #[inline] pub fn hook_check_free(&mut self, ptr: *mut c_void) -> bool { self.allocator_mut().is_managed(ptr) @@ -139,10 +782,11 @@ impl AsanRuntime { #[inline] #[allow(clippy::cmp_null)] - pub fn hook_free(&mut self, ptr: *mut c_void) { + pub fn hook_free(&mut self, ptr: *mut c_void) -> usize { if ptr != std::ptr::null_mut() { unsafe { self.allocator_mut().release(ptr) } } + 0 } #[cfg(not(target_vendor = "apple"))] @@ -170,22 +814,48 @@ impl AsanRuntime { self.allocator_mut().get_usable_size(ptr) } + #[inline] + #[allow(non_snake_case)] + #[cfg(windows)] + pub fn hook_MapViewOfFile( + &mut self, + _handle: *const c_void, + _desired_access: u32, + _file_offset_high: u32, + _file_offset_low: u32, + size: usize, + ) -> *const c_void { + let original: extern "C" fn(*const c_void, u32, u32, u32, usize) -> *const c_void = + unsafe { std::mem::transmute(self.hooks.get(&"MapViewOfFile".to_string()).unwrap().0) }; + let ret = (original)( + _handle, + _desired_access, + _file_offset_high, + _file_offset_low, + size, + ); + self.unpoison(ret as usize, size); + ret + } + #[allow(non_snake_case)] #[allow(clippy::cmp_null)] #[inline] - pub fn hook__ZdaPv(&mut self, ptr: *mut c_void) { + pub fn hook__ZdaPv(&mut self, ptr: *mut c_void) -> usize { if ptr != std::ptr::null_mut() { unsafe { self.allocator_mut().release(ptr) } } + 0 } #[allow(non_snake_case)] #[allow(clippy::cmp_null)] #[inline] - pub fn hook__ZdaPvm(&mut self, ptr: *mut c_void, _ulong: u64) { + pub fn hook__ZdaPvm(&mut self, ptr: *mut c_void, _ulong: u64) -> usize { if ptr != std::ptr::null_mut() { unsafe { self.allocator_mut().release(ptr) } } + 0 } #[allow(non_snake_case)] @@ -196,19 +866,25 @@ impl AsanRuntime { ptr: *mut c_void, _ulong: u64, _alignment: usize, - ) { + ) -> usize { if ptr != std::ptr::null_mut() { unsafe { self.allocator_mut().release(ptr) } } + 0 } #[allow(non_snake_case)] #[allow(clippy::cmp_null)] #[inline] - pub fn hook__ZdaPvRKSt9nothrow_t(&mut self, ptr: *mut c_void, _nothrow: *const c_void) { + pub fn hook__ZdaPvRKSt9nothrow_t( + &mut self, + ptr: *mut c_void, + _nothrow: *const c_void, + ) -> usize { if ptr != std::ptr::null_mut() { unsafe { self.allocator_mut().release(ptr) } } + 0 } #[allow(non_snake_case)] @@ -219,37 +895,41 @@ impl AsanRuntime { ptr: *mut c_void, _alignment: usize, _nothrow: *const c_void, - ) { + ) -> usize { if ptr != std::ptr::null_mut() { unsafe { self.allocator_mut().release(ptr) } } + 0 } #[allow(non_snake_case)] #[allow(clippy::cmp_null)] #[inline] - pub fn hook__ZdaPvSt11align_val_t(&mut self, ptr: *mut c_void, _alignment: usize) { + pub fn hook__ZdaPvSt11align_val_t(&mut self, ptr: *mut c_void, _alignment: usize) -> usize { if ptr != std::ptr::null_mut() { unsafe { self.allocator_mut().release(ptr) } } + 0 } #[allow(non_snake_case)] #[allow(clippy::cmp_null)] #[inline] - pub fn hook__ZdlPv(&mut self, ptr: *mut c_void) { + pub fn hook__ZdlPv(&mut self, ptr: *mut c_void) -> usize { if ptr != std::ptr::null_mut() { unsafe { self.allocator_mut().release(ptr) } } + 0 } #[allow(non_snake_case)] #[allow(clippy::cmp_null)] #[inline] - pub fn hook__ZdlPvm(&mut self, ptr: *mut c_void, _ulong: u64) { + pub fn hook__ZdlPvm(&mut self, ptr: *mut c_void, _ulong: u64) -> usize { if ptr != std::ptr::null_mut() { unsafe { self.allocator_mut().release(ptr) } } + 0 } #[allow(non_snake_case)] @@ -260,19 +940,25 @@ impl AsanRuntime { ptr: *mut c_void, _ulong: u64, _alignment: usize, - ) { + ) -> usize { if ptr != std::ptr::null_mut() { unsafe { self.allocator_mut().release(ptr) } } + 0 } #[allow(non_snake_case)] #[allow(clippy::cmp_null)] #[inline] - pub fn hook__ZdlPvRKSt9nothrow_t(&mut self, ptr: *mut c_void, _nothrow: *const c_void) { + pub fn hook__ZdlPvRKSt9nothrow_t( + &mut self, + ptr: *mut c_void, + _nothrow: *const c_void, + ) -> usize { if ptr != std::ptr::null_mut() { unsafe { self.allocator_mut().release(ptr) } } + 0 } #[allow(non_snake_case)] @@ -283,19 +969,21 @@ impl AsanRuntime { ptr: *mut c_void, _alignment: usize, _nothrow: *const c_void, - ) { + ) -> usize { if ptr != std::ptr::null_mut() { unsafe { self.allocator_mut().release(ptr) } } + 0 } #[allow(non_snake_case)] #[allow(clippy::cmp_null)] #[inline] - pub fn hook__ZdlPvSt11align_val_t(&mut self, ptr: *mut c_void, _alignment: usize) { + pub fn hook__ZdlPvSt11align_val_t(&mut self, ptr: *mut c_void, _alignment: usize) -> usize { if ptr != std::ptr::null_mut() { unsafe { self.allocator_mut().release(ptr) } } + 0 } #[inline] @@ -308,7 +996,7 @@ impl AsanRuntime { fd: i32, offset: usize, ) -> *mut c_void { - extern "C" { + extern "system" { fn mmap( addr: *const c_void, length: usize, @@ -328,7 +1016,7 @@ impl AsanRuntime { #[inline] pub fn hook_munmap(&mut self, addr: *const c_void, length: usize) -> i32 { - extern "C" { + extern "system" { fn munmap(addr: *const c_void, length: usize) -> i32; } let res = unsafe { munmap(addr, length) }; @@ -338,15 +1026,20 @@ impl AsanRuntime { res } + #[inline] + #[allow(non_snake_case)] + pub fn hook__write(&mut self, fd: i32, buf: *const c_void, count: usize) -> usize { + self.hook_write(fd, buf, count) + } #[inline] pub fn hook_write(&mut self, fd: i32, buf: *const c_void, count: usize) -> usize { - extern "C" { + extern "system" { fn write(fd: i32, buf: *const c_void, count: usize) -> usize; } - if !(self.shadow_check_func().unwrap())(buf, count) { + if !self.allocator_mut().check_shadow(buf, count) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "write".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), buf as usize, count, Backtrace::new(), @@ -355,15 +1048,20 @@ impl AsanRuntime { unsafe { write(fd, buf, count) } } + #[inline] + #[allow(non_snake_case)] + pub fn hook__read(&mut self, fd: i32, buf: *mut c_void, count: usize) -> usize { + self.hook_read(fd, buf, count) + } #[inline] pub fn hook_read(&mut self, fd: i32, buf: *mut c_void, count: usize) -> usize { - extern "C" { + extern "system" { fn read(fd: i32, buf: *mut c_void, count: usize) -> usize; } - if !(self.shadow_check_func().unwrap())(buf, count) { + if !self.allocator_mut().check_shadow(buf, count) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "read".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), buf as usize, count, Backtrace::new(), @@ -374,13 +1072,13 @@ impl AsanRuntime { #[inline] pub fn hook_fgets(&mut self, s: *mut c_void, size: u32, stream: *mut c_void) -> *mut c_void { - extern "C" { + extern "system" { fn fgets(s: *mut c_void, size: u32, stream: *mut c_void) -> *mut c_void; } - if !(self.shadow_check_func().unwrap())(s, size as usize) { + if !self.allocator_mut().check_shadow(s, size as usize) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "fgets".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, size as usize, Backtrace::new(), @@ -391,22 +1089,22 @@ impl AsanRuntime { #[inline] pub fn hook_memcmp(&mut self, s1: *const c_void, s2: *const c_void, n: usize) -> i32 { - extern "C" { + extern "system" { fn memcmp(s1: *const c_void, s2: *const c_void, n: usize) -> i32; } - if !(self.shadow_check_func().unwrap())(s1, n) { + if !self.allocator_mut().check_shadow(s1, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "memcmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s1 as usize, n, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(s2, n) { + if !self.allocator_mut().check_shadow(s2, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "memcmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s2 as usize, n, Backtrace::new(), @@ -417,22 +1115,22 @@ impl AsanRuntime { #[inline] pub fn hook_memcpy(&mut self, dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void { - extern "C" { + extern "system" { fn memcpy(dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void; } - if !(self.shadow_check_func().unwrap())(dest, n) { + if !self.allocator_mut().check_shadow(dest, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "memcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), dest as usize, n, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(src, n) { + if !self.allocator_mut().check_shadow(src, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "memcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), src as usize, n, Backtrace::new(), @@ -444,22 +1142,22 @@ impl AsanRuntime { #[inline] #[cfg(not(target_vendor = "apple"))] pub fn hook_mempcpy(&mut self, dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void { - extern "C" { + extern "system" { fn mempcpy(dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void; } - if !(self.shadow_check_func().unwrap())(dest, n) { + if !self.allocator_mut().check_shadow(dest, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "mempcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), dest as usize, n, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(src, n) { + if !self.allocator_mut().check_shadow(src, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "mempcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), src as usize, n, Backtrace::new(), @@ -470,39 +1168,40 @@ impl AsanRuntime { #[inline] pub fn hook_memmove(&mut self, dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void { - extern "C" { + extern "system" { fn memmove(dest: *mut c_void, src: *const c_void, n: usize) -> *mut c_void; } - if !(self.shadow_check_func().unwrap())(dest, n) { + if !self.allocator_mut().check_shadow(dest, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "memmove".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), dest as usize, n, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(src, n) { + if !self.allocator_mut().check_shadow(src, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "memmove".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), src as usize, n, Backtrace::new(), ))); } + unsafe { memmove(dest, src, n) } } #[inline] pub fn hook_memset(&mut self, dest: *mut c_void, c: i32, n: usize) -> *mut c_void { - extern "C" { + extern "system" { fn memset(dest: *mut c_void, c: i32, n: usize) -> *mut c_void; } - if !(self.shadow_check_func().unwrap())(dest, n) { + if !self.allocator_mut().check_shadow(dest, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "memset".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), dest as usize, n, Backtrace::new(), @@ -513,13 +1212,13 @@ impl AsanRuntime { #[inline] pub fn hook_memchr(&mut self, s: *mut c_void, c: i32, n: usize) -> *mut c_void { - extern "C" { + extern "system" { fn memchr(s: *mut c_void, c: i32, n: usize) -> *mut c_void; } - if !(self.shadow_check_func().unwrap())(s, n) { + if !self.allocator_mut().check_shadow(s, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "memchr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, n, Backtrace::new(), @@ -531,13 +1230,13 @@ impl AsanRuntime { #[inline] #[cfg(not(target_vendor = "apple"))] pub fn hook_memrchr(&mut self, s: *mut c_void, c: i32, n: usize) -> *mut c_void { - extern "C" { + extern "system" { fn memrchr(s: *mut c_void, c: i32, n: usize) -> *mut c_void; } - if !(self.shadow_check_func().unwrap())(s, n) { + if !self.allocator_mut().check_shadow(s, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "memrchr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, n, Backtrace::new(), @@ -554,7 +1253,7 @@ impl AsanRuntime { needle: *const c_void, needlelen: usize, ) -> *mut c_void { - extern "C" { + extern "system" { fn memmem( haystack: *const c_void, haystacklen: usize, @@ -562,19 +1261,19 @@ impl AsanRuntime { needlelen: usize, ) -> *mut c_void; } - if !(self.shadow_check_func().unwrap())(haystack, haystacklen) { + if !self.allocator_mut().check_shadow(haystack, haystacklen) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "memmem".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), haystack as usize, haystacklen, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(needle, needlelen) { + if !self.allocator_mut().check_shadow(needle, needlelen) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "memmem".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), needle as usize, needlelen, Backtrace::new(), @@ -585,14 +1284,14 @@ impl AsanRuntime { #[cfg(not(target_os = "android"))] #[inline] - pub fn hook_bzero(&mut self, s: *mut c_void, n: usize) { - extern "C" { - fn bzero(s: *mut c_void, n: usize); + pub fn hook_bzero(&mut self, s: *mut c_void, n: usize) -> usize { + extern "system" { + fn bzero(s: *mut c_void, n: usize) -> usize; } - if !(self.shadow_check_func().unwrap())(s, n) { + if !self.allocator_mut().check_shadow(s, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "bzero".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, n, Backtrace::new(), @@ -603,14 +1302,14 @@ impl AsanRuntime { #[cfg(all(not(target_os = "android"), not(target_vendor = "apple")))] #[inline] - pub fn hook_explicit_bzero(&mut self, s: *mut c_void, n: usize) { - extern "C" { - fn explicit_bzero(s: *mut c_void, n: usize); + pub fn hook_explicit_bzero(&mut self, s: *mut c_void, n: usize) -> usize { + extern "system" { + fn explicit_bzero(s: *mut c_void, n: usize) -> usize; } - if !(self.shadow_check_func().unwrap())(s, n) { + if !self.allocator_mut().check_shadow(s, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "explicit_bzero".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, n, Backtrace::new(), @@ -622,22 +1321,22 @@ impl AsanRuntime { #[cfg(not(target_os = "android"))] #[inline] pub fn hook_bcmp(&mut self, s1: *const c_void, s2: *const c_void, n: usize) -> i32 { - extern "C" { + extern "system" { fn bcmp(s1: *const c_void, s2: *const c_void, n: usize) -> i32; } - if !(self.shadow_check_func().unwrap())(s1, n) { + if !self.allocator_mut().check_shadow(s1, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "bcmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s1 as usize, n, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(s2, n) { + if !self.allocator_mut().check_shadow(s2, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "bcmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s2 as usize, n, Backtrace::new(), @@ -648,14 +1347,17 @@ impl AsanRuntime { #[inline] pub fn hook_strchr(&mut self, s: *mut c_char, c: i32) -> *mut c_char { - extern "C" { + extern "system" { fn strchr(s: *mut c_char, c: i32) -> *mut c_char; fn strlen(s: *const c_char) -> usize; } - if !(self.shadow_check_func().unwrap())(s as *const c_void, unsafe { strlen(s) }) { + if !self + .allocator_mut() + .check_shadow(s as *const c_void, unsafe { strlen(s) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strchr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, unsafe { strlen(s) }, Backtrace::new(), @@ -666,14 +1368,17 @@ impl AsanRuntime { #[inline] pub fn hook_strrchr(&mut self, s: *mut c_char, c: i32) -> *mut c_char { - extern "C" { + extern "system" { fn strrchr(s: *mut c_char, c: i32) -> *mut c_char; fn strlen(s: *const c_char) -> usize; } - if !(self.shadow_check_func().unwrap())(s as *const c_void, unsafe { strlen(s) }) { + if !self + .allocator_mut() + .check_shadow(s as *const c_void, unsafe { strlen(s) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strrchr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, unsafe { strlen(s) }, Backtrace::new(), @@ -684,23 +1389,29 @@ impl AsanRuntime { #[inline] pub fn hook_strcasecmp(&mut self, s1: *const c_char, s2: *const c_char) -> i32 { - extern "C" { + extern "system" { fn strcasecmp(s1: *const c_char, s2: *const c_char) -> i32; fn strlen(s: *const c_char) -> usize; } - if !(self.shadow_check_func().unwrap())(s1 as *const c_void, unsafe { strlen(s1) }) { + if !self + .allocator_mut() + .check_shadow(s1 as *const c_void, unsafe { strlen(s1) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strcasecmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s1 as usize, unsafe { strlen(s1) }, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(s2 as *const c_void, unsafe { strlen(s2) }) { + if !self + .allocator_mut() + .check_shadow(s2 as *const c_void, unsafe { strlen(s2) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strcasecmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s2 as usize, unsafe { strlen(s2) }, Backtrace::new(), @@ -711,22 +1422,22 @@ impl AsanRuntime { #[inline] pub fn hook_strncasecmp(&mut self, s1: *const c_char, s2: *const c_char, n: usize) -> i32 { - extern "C" { + extern "system" { fn strncasecmp(s1: *const c_char, s2: *const c_char, n: usize) -> i32; } - if !(self.shadow_check_func().unwrap())(s1 as *const c_void, n) { + if !self.allocator_mut().check_shadow(s1 as *const c_void, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strncasecmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s1 as usize, n, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(s2 as *const c_void, n) { + if !self.allocator_mut().check_shadow(s2 as *const c_void, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strncasecmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s2 as usize, n, Backtrace::new(), @@ -737,23 +1448,29 @@ impl AsanRuntime { #[inline] pub fn hook_strcat(&mut self, s1: *mut c_char, s2: *const c_char) -> *mut c_char { - extern "C" { + extern "system" { fn strcat(s1: *mut c_char, s2: *const c_char) -> *mut c_char; fn strlen(s: *const c_char) -> usize; } - if !(self.shadow_check_func().unwrap())(s1 as *const c_void, unsafe { strlen(s1) }) { + if !self + .allocator_mut() + .check_shadow(s1 as *const c_void, unsafe { strlen(s1) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strcat".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s1 as usize, unsafe { strlen(s1) }, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(s2 as *const c_void, unsafe { strlen(s2) }) { + if !self + .allocator_mut() + .check_shadow(s2 as *const c_void, unsafe { strlen(s2) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strcat".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s2 as usize, unsafe { strlen(s2) }, Backtrace::new(), @@ -764,23 +1481,29 @@ impl AsanRuntime { #[inline] pub fn hook_strcmp(&mut self, s1: *const c_char, s2: *const c_char) -> i32 { - extern "C" { + extern "system" { fn strcmp(s1: *const c_char, s2: *const c_char) -> i32; fn strlen(s: *const c_char) -> usize; } - if !(self.shadow_check_func().unwrap())(s1 as *const c_void, unsafe { strlen(s1) }) { + if !self + .allocator_mut() + .check_shadow(s1 as *const c_void, unsafe { strlen(s1) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strcmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s1 as usize, unsafe { strlen(s1) }, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(s2 as *const c_void, unsafe { strlen(s2) }) { + if !self + .allocator_mut() + .check_shadow(s2 as *const c_void, unsafe { strlen(s2) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strcmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s2 as usize, unsafe { strlen(s2) }, Backtrace::new(), @@ -791,23 +1514,29 @@ impl AsanRuntime { #[inline] pub fn hook_strncmp(&mut self, s1: *const c_char, s2: *const c_char, n: usize) -> i32 { - extern "C" { + extern "system" { fn strncmp(s1: *const c_char, s2: *const c_char, n: usize) -> i32; fn strnlen(s: *const c_char, n: usize) -> usize; } - if !(self.shadow_check_func().unwrap())(s1 as *const c_void, unsafe { strnlen(s1, n) }) { + if !self + .allocator_mut() + .check_shadow(s1 as *const c_void, unsafe { strnlen(s1, n) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strncmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s1 as usize, n, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(s2 as *const c_void, unsafe { strnlen(s2, n) }) { + if !self + .allocator_mut() + .check_shadow(s2 as *const c_void, unsafe { strnlen(s2, n) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strncmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s2 as usize, n, Backtrace::new(), @@ -818,23 +1547,29 @@ impl AsanRuntime { #[inline] pub fn hook_strcpy(&mut self, dest: *mut c_char, src: *const c_char) -> *mut c_char { - extern "C" { + extern "system" { fn strcpy(dest: *mut c_char, src: *const c_char) -> *mut c_char; fn strlen(s: *const c_char) -> usize; } - if !(self.shadow_check_func().unwrap())(dest as *const c_void, unsafe { strlen(src) }) { + if !self + .allocator_mut() + .check_shadow(dest as *const c_void, unsafe { strlen(src) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "strcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), dest as usize, unsafe { strlen(src) }, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(src as *const c_void, unsafe { strlen(src) }) { + if !self + .allocator_mut() + .check_shadow(src as *const c_void, unsafe { strlen(src) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), src as usize, unsafe { strlen(src) }, Backtrace::new(), @@ -845,22 +1580,22 @@ impl AsanRuntime { #[inline] pub fn hook_strncpy(&mut self, dest: *mut c_char, src: *const c_char, n: usize) -> *mut c_char { - extern "C" { + extern "system" { fn strncpy(dest: *mut c_char, src: *const c_char, n: usize) -> *mut c_char; } - if !(self.shadow_check_func().unwrap())(dest as *const c_void, n) { + if !self.allocator_mut().check_shadow(dest as *const c_void, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "strncpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), dest as usize, n, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(src as *const c_void, n) { + if !self.allocator_mut().check_shadow(src as *const c_void, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strncpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), src as usize, n, Backtrace::new(), @@ -871,23 +1606,29 @@ impl AsanRuntime { #[inline] pub fn hook_stpcpy(&mut self, dest: *mut c_char, src: *const c_char) -> *mut c_char { - extern "C" { + extern "system" { fn stpcpy(dest: *mut c_char, src: *const c_char) -> *mut c_char; fn strlen(s: *const c_char) -> usize; } - if !(self.shadow_check_func().unwrap())(dest as *const c_void, unsafe { strlen(src) }) { + if !self + .allocator_mut() + .check_shadow(dest as *const c_void, unsafe { strlen(src) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "stpcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), dest as usize, unsafe { strlen(src) }, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(src as *const c_void, unsafe { strlen(src) }) { + if !self + .allocator_mut() + .check_shadow(src as *const c_void, unsafe { strlen(src) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "stpcpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), src as usize, unsafe { strlen(src) }, Backtrace::new(), @@ -896,17 +1637,22 @@ impl AsanRuntime { unsafe { stpcpy(dest, src) } } + #[inline] + #[allow(non_snake_case)] + pub fn hook__strdup(&mut self, s: *const c_char) -> *mut c_char { + self.hook_strdup(s) + } #[inline] pub fn hook_strdup(&mut self, s: *const c_char) -> *mut c_char { - extern "C" { + extern "system" { fn strlen(s: *const c_char) -> usize; fn strcpy(dest: *mut c_char, src: *const c_char) -> *mut c_char; } let size = unsafe { strlen(s) }; - if !(self.shadow_check_func().unwrap())(s as *const c_void, size) { + if !self.allocator_mut().check_shadow(s as *const c_void, size) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strdup".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, unsafe { strlen(s) }, Backtrace::new(), @@ -922,14 +1668,14 @@ impl AsanRuntime { #[inline] pub fn hook_strlen(&mut self, s: *const c_char) -> usize { - extern "C" { + extern "system" { fn strlen(s: *const c_char) -> usize; } let size = unsafe { strlen(s) }; - if !(self.shadow_check_func().unwrap())(s as *const c_void, size) { + if !self.allocator_mut().check_shadow(s as *const c_void, size) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strlen".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, size, Backtrace::new(), @@ -940,14 +1686,14 @@ impl AsanRuntime { #[inline] pub fn hook_strnlen(&mut self, s: *const c_char, n: usize) -> usize { - extern "C" { + extern "system" { fn strnlen(s: *const c_char, n: usize) -> usize; } let size = unsafe { strnlen(s, n) }; - if !(self.shadow_check_func().unwrap())(s as *const c_void, size) { + if !self.allocator_mut().check_shadow(s as *const c_void, size) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strnlen".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, size, Backtrace::new(), @@ -958,26 +1704,29 @@ impl AsanRuntime { #[inline] pub fn hook_strstr(&mut self, haystack: *const c_char, needle: *const c_char) -> *mut c_char { - extern "C" { + extern "system" { fn strstr(haystack: *const c_char, needle: *const c_char) -> *mut c_char; fn strlen(s: *const c_char) -> usize; } - if !(self.shadow_check_func().unwrap())(haystack as *const c_void, unsafe { - strlen(haystack) - }) { + if !self + .allocator_mut() + .check_shadow(haystack as *const c_void, unsafe { strlen(haystack) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strstr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), haystack as usize, unsafe { strlen(haystack) }, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(needle as *const c_void, unsafe { strlen(needle) }) + if !self + .allocator_mut() + .check_shadow(needle as *const c_void, unsafe { strlen(needle) }) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strstr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), needle as usize, unsafe { strlen(needle) }, Backtrace::new(), @@ -992,26 +1741,29 @@ impl AsanRuntime { haystack: *const c_char, needle: *const c_char, ) -> *mut c_char { - extern "C" { + extern "system" { fn strcasestr(haystack: *const c_char, needle: *const c_char) -> *mut c_char; fn strlen(s: *const c_char) -> usize; } - if !(self.shadow_check_func().unwrap())(haystack as *const c_void, unsafe { - strlen(haystack) - }) { + if !self + .allocator_mut() + .check_shadow(haystack as *const c_void, unsafe { strlen(haystack) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strcasestr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), haystack as usize, unsafe { strlen(haystack) }, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(needle as *const c_void, unsafe { strlen(needle) }) + if !self + .allocator_mut() + .check_shadow(needle as *const c_void, unsafe { strlen(needle) }) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "strcasestr".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), needle as usize, unsafe { strlen(needle) }, Backtrace::new(), @@ -1022,14 +1774,17 @@ impl AsanRuntime { #[inline] pub fn hook_atoi(&mut self, s: *const c_char) -> i32 { - extern "C" { + extern "system" { fn atoi(s: *const c_char) -> i32; fn strlen(s: *const c_char) -> usize; } - if !(self.shadow_check_func().unwrap())(s as *const c_void, unsafe { strlen(s) }) { + if !self + .allocator_mut() + .check_shadow(s as *const c_void, unsafe { strlen(s) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "atoi".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, unsafe { strlen(s) }, Backtrace::new(), @@ -1041,14 +1796,17 @@ impl AsanRuntime { /// Hooks `atol` #[inline] pub fn hook_atol(&mut self, s: *const c_char) -> i32 { - extern "C" { + extern "system" { fn atol(s: *const c_char) -> i32; fn strlen(s: *const c_char) -> usize; } - if !(self.shadow_check_func().unwrap())(s as *const c_void, unsafe { strlen(s) }) { + if !self + .allocator_mut() + .check_shadow(s as *const c_void, unsafe { strlen(s) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "atol".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, unsafe { strlen(s) }, Backtrace::new(), @@ -1060,14 +1818,17 @@ impl AsanRuntime { /// Hooks `atoll` #[inline] pub fn hook_atoll(&mut self, s: *const c_char) -> i64 { - extern "C" { + extern "system" { fn atoll(s: *const c_char) -> i64; fn strlen(s: *const c_char) -> usize; } - if !(self.shadow_check_func().unwrap())(s as *const c_void, unsafe { strlen(s) }) { + if !self + .allocator_mut() + .check_shadow(s as *const c_void, unsafe { strlen(s) }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "atoll".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, unsafe { strlen(s) }, Backtrace::new(), @@ -1079,14 +1840,17 @@ impl AsanRuntime { /// Hooks `wcslen` #[inline] pub fn hook_wcslen(&mut self, s: *const wchar_t) -> usize { - extern "C" { + extern "system" { fn wcslen(s: *const wchar_t) -> usize; } let size = unsafe { wcslen(s) }; - if !(self.shadow_check_func().unwrap())(s as *const c_void, (size + 1) * 2) { + if !self + .allocator_mut() + .check_shadow(s as *const c_void, (size + 1) * 2) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "wcslen".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, (size + 1) * 2, Backtrace::new(), @@ -1098,27 +1862,29 @@ impl AsanRuntime { /// Hooks `wcscpy` #[inline] pub fn hook_wcscpy(&mut self, dest: *mut wchar_t, src: *const wchar_t) -> *mut wchar_t { - extern "C" { + extern "system" { fn wcscpy(dest: *mut wchar_t, src: *const wchar_t) -> *mut wchar_t; fn wcslen(s: *const wchar_t) -> usize; } - if !(self.shadow_check_func().unwrap())(dest as *const c_void, unsafe { - (wcslen(src) + 1) * 2 - }) { + if !self + .allocator_mut() + .check_shadow(dest as *const c_void, unsafe { (wcslen(src) + 1) * 2 }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "wcscpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), dest as usize, (unsafe { wcslen(src) } + 1) * 2, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(src as *const c_void, unsafe { - (wcslen(src) + 1) * 2 - }) { + if !self + .allocator_mut() + .check_shadow(src as *const c_void, unsafe { (wcslen(src) + 1) * 2 }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "wcscpy".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), src as usize, (unsafe { wcslen(src) } + 1) * 2, Backtrace::new(), @@ -1130,27 +1896,29 @@ impl AsanRuntime { /// Hooks `wcscmp` #[inline] pub fn hook_wcscmp(&mut self, s1: *const wchar_t, s2: *const wchar_t) -> i32 { - extern "C" { + extern "system" { fn wcscmp(s1: *const wchar_t, s2: *const wchar_t) -> i32; fn wcslen(s: *const wchar_t) -> usize; } - if !(self.shadow_check_func().unwrap())(s1 as *const c_void, unsafe { - (wcslen(s1) + 1) * 2 - }) { + if !self + .allocator_mut() + .check_shadow(s1 as *const c_void, unsafe { (wcslen(s1) + 1) * 2 }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "wcscmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s1 as usize, (unsafe { wcslen(s1) } + 1) * 2, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(s2 as *const c_void, unsafe { - (wcslen(s2) + 1) * 2 - }) { + if !self + .allocator_mut() + .check_shadow(s2 as *const c_void, unsafe { (wcslen(s2) + 1) * 2 }) + { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgRead(( "wcscmp".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s2 as usize, (unsafe { wcslen(s2) } + 1) * 2, Backtrace::new(), @@ -1162,22 +1930,22 @@ impl AsanRuntime { #[cfg(target_vendor = "apple")] #[inline] pub fn hook_memset_pattern4(&mut self, s: *mut c_void, p4: *const c_void, n: usize) { - extern "C" { + extern "system" { fn memset_pattern4(s: *mut c_void, p4: *const c_void, n: usize); } - if !(self.shadow_check_func().unwrap())(s, n) { + if !self.allocator_mut().check_shadow(s, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "memset_pattern4".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, n, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(p4, n / 4) { + if !self.allocator_mut().check_shadow(p4, n / 4) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "memset_pattern4".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), p4 as usize, n / 4, Backtrace::new(), @@ -1189,22 +1957,22 @@ impl AsanRuntime { #[cfg(target_vendor = "apple")] #[inline] pub fn hook_memset_pattern8(&mut self, s: *mut c_void, p8: *const c_void, n: usize) { - extern "C" { + extern "system" { fn memset_pattern8(s: *mut c_void, p8: *const c_void, n: usize); } - if !(self.shadow_check_func().unwrap())(s, n) { + if !self.allocator_mut().check_shadow(s, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "memset_pattern8".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, n, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(p8, n / 8) { + if !self.allocator_mut().check_shadow(p8, n / 8) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "memset_pattern8".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), p8 as usize, n / 8, Backtrace::new(), @@ -1216,22 +1984,22 @@ impl AsanRuntime { #[cfg(target_vendor = "apple")] #[inline] pub fn hook_memset_pattern16(&mut self, s: *mut c_void, p16: *const c_void, n: usize) { - extern "C" { + extern "system" { fn memset_pattern16(s: *mut c_void, p16: *const c_void, n: usize); } - if !(self.shadow_check_func().unwrap())(s, n) { + if !self.allocator_mut().check_shadow(s, n) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "memset_pattern16".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), s as usize, n, Backtrace::new(), ))); } - if !(self.shadow_check_func().unwrap())(p16, n / 16) { + if !self.allocator_mut().check_shadow(p16, n / 16) { AsanErrors::get_mut_blocking().report_error(AsanError::BadFuncArgWrite(( "memset_pattern16".to_string(), - self.real_address_for_stalked(AsanRuntime::pc()), + self.real_address_for_stalked(self.pc()), p16 as usize, n / 16, Backtrace::new(), diff --git a/libafl_frida/src/executor.rs b/libafl_frida/src/executor.rs index 7ede0b357a..78488d38f3 100644 --- a/libafl_frida/src/executor.rs +++ b/libafl_frida/src/executor.rs @@ -1,6 +1,6 @@ -#[cfg(all(unix, not(test)))] -use core::borrow::Borrow; use core::fmt::{self, Debug, Formatter}; +#[cfg(windows)] +use std::process::abort; use std::{ffi::c_void, marker::PhantomData}; use frida_gum::{ @@ -21,7 +21,7 @@ use libafl::{ }; use libafl_bolts::tuples::RefIndexable; -#[cfg(all(unix, not(test)))] +#[cfg(not(test))] use crate::asan::errors::AsanErrors; use crate::helper::{FridaInstrumentationHelper, FridaRuntimeTuple}; #[cfg(windows)] @@ -106,11 +106,13 @@ where self.stalker.deactivate(); } - #[cfg(all(unix, not(test)))] + #[cfg(not(test))] unsafe { - if !AsanErrors::get_mut_blocking().borrow().is_empty() { + if !AsanErrors::get_mut_blocking().is_empty() { log::error!("Crashing target as it had ASan errors"); libc::raise(libc::SIGABRT); + #[cfg(windows)] + abort(); } } self.helper.post_exec(input)?; @@ -206,6 +208,7 @@ where } } + log::info!("disable_excludes: {:}", helper.disable_excludes); if !helper.disable_excludes { for range in ranges.gaps(&(0..usize::MAX)) { log::info!("excluding range: {:x}-{:x}", range.start, range.end); diff --git a/libafl_frida/src/helper.rs b/libafl_frida/src/helper.rs index c27aece94a..7c73ec40cc 100644 --- a/libafl_frida/src/helper.rs +++ b/libafl_frida/src/helper.rs @@ -6,9 +6,8 @@ use std::{ rc::Rc, }; -#[cfg(unix)] -use frida_gum::instruction_writer::InstructionWriter; use frida_gum::{ + instruction_writer::InstructionWriter, stalker::{StalkerIterator, StalkerOutput, Transformer}, Gum, Module, ModuleDetails, ModuleMap, PageProtection, }; @@ -28,11 +27,9 @@ use yaxpeax_arm::armv8::a64::{ARMv8, InstDecoder}; #[cfg(target_arch = "x86_64")] use yaxpeax_x86::amd64::InstDecoder; -#[cfg(unix)] -use crate::asan::asan_rt::AsanRuntime; #[cfg(feature = "cmplog")] use crate::cmplog_rt::CmpLogRuntime; -use crate::{coverage_rt::CoverageRuntime, drcov_rt::DrCovRuntime}; +use crate::{asan::asan_rt::AsanRuntime, coverage_rt::CoverageRuntime, drcov_rt::DrCovRuntime}; #[cfg(target_vendor = "apple")] const ANONYMOUS_FLAG: MapFlags = MapFlags::MAP_ANON; @@ -275,6 +272,11 @@ impl FridaInstrumentationHelperBuilder { if stalker_enabled { for (i, module) in module_map.values().iter().enumerate() { + log::trace!( + "module: {:?} {:x}", + module.name(), + module.range().base_address().0 as usize + ); let range = module.range(); let start = range.base_address().0 as usize; ranges @@ -330,7 +332,7 @@ impl Default for FridaInstrumentationHelperBuilder { fn default() -> Self { Self { stalker_enabled: true, - disable_excludes: true, + disable_excludes: false, instrument_module_predicate: None, skip_module_predicate: Box::new(|module| { // Skip the instrumentation module to avoid recursion. @@ -445,7 +447,7 @@ where let runtimes = Rc::clone(runtimes); #[cfg(target_arch = "x86_64")] - let decoder = InstDecoder::minimal(); + let decoder = InstDecoder::default(); #[cfg(target_arch = "aarch64")] let decoder = ::Decoder::default(); @@ -459,7 +461,7 @@ where basic_block: StalkerIterator, output: &StalkerOutput, ranges: &Rc>>, - runtimes: &Rc>, + runtimes_unborrowed: &Rc>, decoder: InstDecoder, ) { let mut first = true; @@ -469,10 +471,10 @@ where let instr = instruction.instr(); let instr_size = instr.bytes().len(); let address = instr.address(); - // log::trace!("block @ {:x} transformed to {:x}", address, output.writer().pc()); - + // log::trace!("x - block @ {:x} transformed to {:x}", address, output.writer().pc()); + //the ASAN check needs to be done before the hook_rt check due to x86 insns such as call [mem] if ranges.borrow().contains_key(&(address as usize)) { - let mut runtimes = (*runtimes).borrow_mut(); + let mut runtimes = (*runtimes_unborrowed).borrow_mut(); if first { first = false; // log::info!( @@ -483,24 +485,29 @@ where if let Some(rt) = runtimes.match_first_type_mut::() { rt.emit_coverage_mapping(address, output); } - if let Some(_rt) = runtimes.match_first_type_mut::() { basic_block_start = address; } } - #[cfg(unix)] let res = if let Some(_rt) = runtimes.match_first_type_mut::() { AsanRuntime::asan_is_interesting_instruction(decoder, address, instr) } else { None }; - #[cfg(all(target_arch = "x86_64", unix))] + #[cfg(target_arch = "x86_64")] if let Some(details) = res { if let Some(rt) = runtimes.match_first_type_mut::() { rt.emit_shadow_check( - address, output, details.0, details.1, details.2, details.3, details.4, + address, + output, + instr.bytes().len(), + details.0, + details.1, + details.2, + details.3, + details.4, ); } } @@ -541,7 +548,6 @@ where } } - #[cfg(unix)] if let Some(rt) = runtimes.match_first_type_mut::() { rt.add_stalked_address( output.writer().pc() as usize - instr_size, @@ -556,7 +562,10 @@ where instruction.keep(); } if basic_block_size != 0 { - if let Some(rt) = runtimes.borrow_mut().match_first_type_mut::() { + if let Some(rt) = runtimes_unborrowed + .borrow_mut() + .match_first_type_mut::() + { log::trace!("{basic_block_start:#016X}:{basic_block_size:X}"); rt.drcov_basic_blocks.push(DrCovBasicBlock::new( basic_block_start as usize, diff --git a/libafl_frida/src/lib.rs b/libafl_frida/src/lib.rs index f284e4853c..9685aad7b3 100644 --- a/libafl_frida/src/lib.rs +++ b/libafl_frida/src/lib.rs @@ -64,10 +64,8 @@ Additional documentation is available in [the `LibAFL` book](https://aflplus.plu )] /// The frida-asan allocator -#[cfg(unix)] pub mod alloc; -#[cfg(unix)] pub mod asan; #[cfg(windows)] @@ -369,7 +367,7 @@ mod tests { use crate::{ asan::{ asan_rt::AsanRuntime, - errors::{AsanErrorsFeedback, AsanErrorsObserver}, + errors::{AsanErrors, AsanErrorsFeedback, AsanErrorsObserver}, }, coverage_rt::CoverageRuntime, executor::FridaInProcessExecutor, @@ -378,20 +376,56 @@ mod tests { static GUM: OnceLock = OnceLock::new(); + #[allow(clippy::too_many_lines)] unsafe fn test_asan(options: &FuzzerOptions) { // The names of the functions to run let tests = vec![ - ("LLVMFuzzerTestOneInput", 0), - ("heap_oob_read", 1), - ("heap_oob_write", 1), - ("heap_uaf_write", 1), - ("heap_uaf_read", 1), - ("malloc_heap_oob_read", 1), - ("malloc_heap_oob_write", 1), - ("malloc_heap_uaf_write", 1), - ("malloc_heap_uaf_read", 1), + ("LLVMFuzzerTestOneInput", None), + ("heap_oob_read", Some("heap out-of-bounds read")), + ("heap_oob_write", Some("heap out-of-bounds write")), + ("heap_uaf_write", Some("heap use-after-free write")), + ("heap_uaf_read", Some("heap use-after-free read")), + ("malloc_heap_oob_read", Some("heap out-of-bounds read")), + ("malloc_heap_oob_write", Some("heap out-of-bounds write")), + ( + "malloc_heap_oob_write_0x12", + Some("heap out-of-bounds write"), + ), + ( + "malloc_heap_oob_write_0x14", + Some("heap out-of-bounds write"), + ), + ( + "malloc_heap_oob_write_0x17", + Some("heap out-of-bounds write"), + ), + ( + "malloc_heap_oob_write_0x17_int_at_0x16", + Some("heap out-of-bounds write"), + ), + ( + "malloc_heap_oob_write_0x17_int_at_0x15", + Some("heap out-of-bounds write"), + ), + ("malloc_heap_oob_write_0x17_int_at_0x13", None), + ( + "malloc_heap_oob_write_0x17_int_at_0x14", + Some("heap out-of-bounds write"), + ), + ("malloc_heap_uaf_write", Some("heap use-after-free write")), + ("malloc_heap_uaf_read", Some("heap use-after-free read")), ]; + //NOTE: RTLD_NOW is required on linux as otherwise the hooks will NOT work + + #[cfg(target_os = "linux")] + let lib = libloading::os::unix::Library::open( + Some(options.clone().harness.unwrap()), + libloading::os::unix::RTLD_NOW, + ) + .unwrap(); + + #[cfg(not(target_os = "linux"))] let lib = libloading::Library::new(options.clone().harness.unwrap()).unwrap(); let coverage = CoverageRuntime::new(); @@ -404,7 +438,7 @@ mod tests { // Run the tests for each function for test in tests { - let (function_name, err_cnt) = test; + let (function_name, expected_error) = test; log::info!("Testing with harness function {}", function_name); let mut corpus = InMemoryCorpus::::new(); @@ -415,7 +449,7 @@ mod tests { let rand = StdRand::with_seed(0); - let mut feedback = ConstFeedback::new(false); + let mut feedback = ConstFeedback::new(true); let asan_obs = AsanErrorsObserver::from_static_asan_errors(); @@ -446,6 +480,12 @@ mod tests { ); { + #[cfg(target_os = "linux")] + let target_func: libloading::os::unix::Symbol< + unsafe extern "C" fn(data: *const u8, size: usize) -> i32, + > = lib.get(function_name.as_bytes()).unwrap(); + + #[cfg(not(target_os = "linux"))] let target_func: libloading::Symbol< unsafe extern "C" fn(data: *const u8, size: usize) -> i32, > = lib.get(function_name.as_bytes()).unwrap(); @@ -473,19 +513,25 @@ mod tests { let mutator = StdScheduledMutator::new(tuple_list!(BitFlipMutator::new())); let mut stages = tuple_list!(StdMutationalStage::with_max_iterations(mutator, 1)); - // log::info!("Starting fuzzing!"); + log::info!("Starting fuzzing!"); fuzzer .fuzz_one(&mut stages, &mut executor, &mut state, &mut event_manager) .unwrap_or_else(|_| panic!("Error in fuzz_one")); log::info!("Done fuzzing! Got {} solutions", state.solutions().count()); + if let Some(expected_error) = expected_error { + assert_eq!(state.solutions().count(), 1); + if let Some(error) = AsanErrors::get_mut_blocking().errors.first() { + assert_eq!(error.description(), expected_error); + } + } else { + assert_eq!(state.solutions().count(), 0); + } } - assert_eq!(state.solutions().count(), err_cnt); } } #[test] - #[cfg(unix)] fn run_test_asan() { // Read RUST_LOG from the environment and set the log level accordingly (not using env_logger) // Note that in cargo test, the output of successfull tests is suppressed by default, @@ -505,7 +551,10 @@ mod tests { SimpleStdoutLogger::set_logger().unwrap(); // Check if the harness dynamic library is present, if not - skip the test - let test_harness = "test_harness.so"; + #[cfg(unix)] + let test_harness = "./test_harness.so"; + #[cfg(windows)] + let test_harness = ".\\test_harness.dll"; assert!( std::path::Path::new(test_harness).exists(), "Skipping test, {test_harness} not found" diff --git a/libafl_frida/src/utils.rs b/libafl_frida/src/utils.rs index b382b064aa..4385d36236 100644 --- a/libafl_frida/src/utils.rs +++ b/libafl_frida/src/utils.rs @@ -1,7 +1,8 @@ #[cfg(target_arch = "aarch64")] use frida_gum::instruction_writer::Aarch64Register; #[cfg(target_arch = "x86_64")] -use frida_gum::instruction_writer::X86Register; +use frida_gum::{instruction_writer::X86Register, CpuContext}; +use libafl::Error; #[cfg(target_arch = "aarch64")] use num_traits::cast::FromPrimitive; #[cfg(target_arch = "x86_64")] @@ -158,6 +159,30 @@ const X86_64_REGS: [(RegSpec, X86Register); 34] = [ (RegSpec::rip(), X86Register::Rip), ]; +/// Get the value of a register given a context +#[cfg(target_arch = "x86_64")] +pub fn get_register(context: &CpuContext, reg: X86Register) -> u64 { + match reg { + X86Register::Rax => context.rax(), + X86Register::Rbx => context.rbx(), + X86Register::Rcx => context.rcx(), + X86Register::Rdx => context.rdx(), + X86Register::Rdi => context.rdi(), + X86Register::Rsi => context.rsi(), + X86Register::Rsp => context.rsp(), + X86Register::Rbp => context.rbp(), + X86Register::R8 => context.r8(), + X86Register::R9 => context.r9(), + X86Register::R10 => context.r10(), + X86Register::R11 => context.r11(), + X86Register::R12 => context.r12(), + X86Register::R13 => context.r13(), + X86Register::R14 => context.r14(), + X86Register::R15 => context.r15(), + _ => 0, + } +} + /// The writer registers /// frida registers: /// capstone registers: @@ -176,9 +201,25 @@ pub fn writer_register(reg: RegSpec) -> X86Register { } /// Translates a frida instruction to a disassembled instruction. -#[cfg(all(target_arch = "x86_64", unix))] -pub(crate) fn frida_to_cs(decoder: InstDecoder, frida_insn: &frida_gum_sys::Insn) -> Instruction { - decoder.decode_slice(frida_insn.bytes()).unwrap() +#[cfg(target_arch = "x86_64")] +pub(crate) fn frida_to_cs( + decoder: InstDecoder, + frida_insn: &frida_gum_sys::Insn, +) -> Result { + match decoder.decode_slice(frida_insn.bytes()) { + Ok(result) => Ok(result), + Err(error) => { + log::error!( + "{:?}: {:x}: {:?}", + error, + frida_insn.address(), + frida_insn.bytes() + ); + Err(Error::illegal_state( + "Instruction did not disassemble properly", + )) + } + } } #[cfg(target_arch = "x86_64")] @@ -225,6 +266,23 @@ pub fn operand_details(operand: &Operand) -> Option<(X86Register, X86Register, u } } +#[cfg(target_arch = "x86_64")] +/// Get the immediate value of the operand +pub fn immediate_value(operand: &Operand) -> Option { + match operand { + Operand::ImmediateI8(v) => Some(i64::from(*v)), + Operand::ImmediateU8(v) => Some(i64::from(*v)), + Operand::ImmediateI16(v) => Some(i64::from(*v)), + Operand::ImmediateI32(v) => Some(i64::from(*v)), + Operand::ImmediateU16(v) => Some(i64::from(*v)), + Operand::ImmediateU32(v) => Some(i64::from(*v)), + Operand::ImmediateI64(v) => Some(*v), + #[allow(clippy::cast_possible_wrap)] + Operand::ImmediateU64(v) => Some(*v as i64), + _ => None, + } +} + #[derive(Debug, Clone, Copy)] #[cfg(target_arch = "x86_64")] /// What kind of memory access this instruction has diff --git a/libafl_frida/src/windows_hooks.rs b/libafl_frida/src/windows_hooks.rs index f23ebb76dc..73c04c9121 100644 --- a/libafl_frida/src/windows_hooks.rs +++ b/libafl_frida/src/windows_hooks.rs @@ -29,7 +29,7 @@ pub fn initialize(gum: &Gum) { NativePointer(is_processor_feature_present_detour as *mut c_void), NativePointer(std::ptr::null_mut()), ) - .unwrap(); + .unwrap_or_else(|_| NativePointer(std::ptr::null_mut())); interceptor .replace( @@ -37,7 +37,7 @@ pub fn initialize(gum: &Gum) { NativePointer(unhandled_exception_filter_detour as *mut c_void), NativePointer(std::ptr::null_mut()), ) - .unwrap(); + .unwrap_or_else(|_| NativePointer(std::ptr::null_mut())); unsafe extern "C" fn is_processor_feature_present_detour(feature: u32) -> bool { let result = match feature { diff --git a/libafl_frida/test_harness.cpp b/libafl_frida/test_harness.cpp index e1a592cb0c..4ea1369470 100644 --- a/libafl_frida/test_harness.cpp +++ b/libafl_frida/test_harness.cpp @@ -2,62 +2,132 @@ #include #include -extern "C" int heap_uaf_read(const uint8_t *_data, size_t _size) { +#ifdef _MSC_VER + #include + +BOOL APIENTRY DllMain(HANDLE hModule, DWORD ul_reason_for_call, + LPVOID lpReserved) { + return TRUE; +} + + #define EXTERN __declspec(dllexport) extern "C" +#else + #define EXTERN +extern "C" { +#endif + +EXTERN int heap_uaf_read(const uint8_t *_data, size_t _size) { int *array = new int[100]; delete[] array; fprintf(stdout, "%d\n", array[5]); return 0; } -extern "C" int heap_uaf_write(const uint8_t *_data, size_t _size) { +EXTERN int heap_uaf_write(const uint8_t *_data, size_t _size) { int *array = new int[100]; delete[] array; array[5] = 1; return 0; } -extern "C" int heap_oob_read(const uint8_t *_data, size_t _size) { +EXTERN int heap_oob_read(const uint8_t *_data, size_t _size) { int *array = new int[100]; fprintf(stdout, "%d\n", array[100]); delete[] array; return 0; } -extern "C" int heap_oob_write(const uint8_t *_data, size_t _size) { +EXTERN int heap_oob_write(const uint8_t *_data, size_t _size) { int *array = new int[100]; array[100] = 1; delete[] array; return 0; } -extern "C" int malloc_heap_uaf_read(const uint8_t *_data, size_t _size) { +EXTERN int malloc_heap_uaf_read(const uint8_t *_data, size_t _size) { int *array = static_cast(malloc(100 * sizeof(int))); free(array); fprintf(stdout, "%d\n", array[5]); return 0; } -extern "C" int malloc_heap_uaf_write(const uint8_t *_data, size_t _size) { +EXTERN int malloc_heap_uaf_write(const uint8_t *_data, size_t _size) { int *array = static_cast(malloc(100 * sizeof(int))); free(array); array[5] = 1; return 0; } -extern "C" int malloc_heap_oob_read(const uint8_t *_data, size_t _size) { +EXTERN int malloc_heap_oob_read(const uint8_t *_data, size_t _size) { int *array = static_cast(malloc(100 * sizeof(int))); fprintf(stdout, "%d\n", array[100]); free(array); return 0; } -extern "C" int malloc_heap_oob_write(const uint8_t *_data, size_t _size) { +EXTERN int malloc_heap_oob_write(const uint8_t *_data, size_t _size) { int *array = static_cast(malloc(100 * sizeof(int))); array[100] = 1; free(array); return 0; } -extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { +EXTERN int malloc_heap_oob_write_0x12(const uint8_t *_data, size_t _size) { + char *array = static_cast(malloc(0x12)); + array[0x12] = 1; + free(array); + return 0; +} + +EXTERN int malloc_heap_oob_write_0x14(const uint8_t *_data, size_t _size) { + char *array = static_cast(malloc(0x14)); + array[0x14] = 1; + free(array); + return 0; +} + +EXTERN int malloc_heap_oob_write_0x17(const uint8_t *_data, size_t _size) { + char *array = static_cast(malloc(0x17)); + array[0x17] = 1; + free(array); + return 0; +} + +EXTERN int malloc_heap_oob_write_0x17_int_at_0x16(const uint8_t *_data, + size_t _size) { + char *array = static_cast(malloc(0x17)); + *(int *)(&array[0x16]) = 1; + free(array); + return 0; +} + +EXTERN int malloc_heap_oob_write_0x17_int_at_0x15(const uint8_t *_data, + size_t _size) { + char *array = static_cast(malloc(0x17)); + *(int *)(&array[0x15]) = 1; + free(array); + return 0; +} +EXTERN int malloc_heap_oob_write_0x17_int_at_0x14(const uint8_t *_data, + size_t _size) { + char *array = static_cast(malloc(0x17)); + *(int *)(&array[0x14]) = 1; + free(array); + return 0; +} + +EXTERN int malloc_heap_oob_write_0x17_int_at_0x13(const uint8_t *_data, + size_t _size) { + char *array = static_cast(malloc(0x17)); + *(int *)(&array[0x13]) = 1; + free(array); + return 0; +} + +EXTERN int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { // abort(); return 0; } + +#ifndef _MSC_VER +} +#endif diff --git a/libafl_targets/build.rs b/libafl_targets/build.rs index c67a6f6e37..d1ba79984c 100644 --- a/libafl_targets/build.rs +++ b/libafl_targets/build.rs @@ -200,10 +200,11 @@ fn main() { } } + let target_family = std::env::var("CARGO_CFG_TARGET_FAMILY").unwrap(); + #[cfg(feature = "forkserver")] { - #[cfg(unix)] - { + if target_family == "unix" { println!("cargo:rerun-if-changed=src/forkserver.c"); cc::Build::new() @@ -212,8 +213,8 @@ fn main() { } } - #[cfg(all(feature = "windows_asan", windows))] - { + #[cfg(feature = "windows_asan")] + if target_family == "windows" { println!("cargo:rerun-if-changed=src/windows_asan.c"); cc::Build::new() diff --git a/libafl_targets/src/cmplog.c b/libafl_targets/src/cmplog.c index 9a2b175204..ffaec50f3b 100644 --- a/libafl_targets/src/cmplog.c +++ b/libafl_targets/src/cmplog.c @@ -15,9 +15,17 @@ void *__libafl_asan_region_is_poisoned(void *beg, size_t size) { return NULL; } - #pragma comment( \ - linker, \ - "/alternatename:__asan_region_is_poisoned=__libafl_asan_region_is_poisoned") + #if defined(__clang__) && defined(_MSC_VER) +void *__asan_region_is_poisoned(void *beg, size_t size) { + (void)beg; + (void)size; + return NULL; +} + #else + #pragma comment( \ + linker, \ + "/alternatename:__asan_region_is_poisoned=__libafl_asan_region_is_poisoned") + #endif #elif defined(__unix__) || (defined(__APPLE__) && defined(__MACH__))