Skip to content

Commit

Permalink
Fix improper page index handling
Browse files Browse the repository at this point in the history
  • Loading branch information
bane9 committed Aug 29, 2024
1 parent 141a963 commit feed287
Show file tree
Hide file tree
Showing 4 changed files with 30 additions and 20 deletions.
4 changes: 2 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,8 @@ The full list is: `test_rvi test_rvm test_rva test_rvmi test_rvsi`
| Platform | Compatible | Comments |
|-----------------|------------|------------------------------------|
| Windows amd64 || |
| Ubuntu amd64 || Currently functioning, with intentional memory leakage applied as a temporary workaround. |
| MacOS amd64 || Compiles, but MAP_FIXED seems to be unsupported on newer MacOS' |
| Ubuntu amd64 || |
| MacOS amd64 || Compiles, but MAP_FIXED seems to be unsupported on newer MacOS' |

## Note

Expand Down
34 changes: 22 additions & 12 deletions src/frontend/code_pages.rs
Original file line number Diff line number Diff line change
@@ -1,40 +1,50 @@
use hashbrown::HashMap;

use crate::backend::common::HostEncodedInsn;
use crate::xmem::{self, AllocationError, PageState};

pub struct CodePages {
xmem: Vec<xmem::CodePage>,
xmem: HashMap<usize, xmem::CodePage>,
idx: usize,
}

impl CodePages {
pub fn new() -> CodePages {
CodePages { xmem: vec![] }
CodePages {
xmem: HashMap::new(),
idx: 0,
}
}

pub fn get_code_page(&mut self, idx: usize) -> &mut xmem::CodePage {
self.xmem.get_mut(idx).unwrap()
self.xmem.get_mut(&idx).unwrap()
}

pub fn alloc_code_page(&mut self) -> (&mut xmem::CodePage, usize) {
self.xmem.push(xmem::CodePage::new());
let idx = self.xmem.len() - 1;
(self.xmem.get_mut(idx).unwrap(), idx)
let idx = self.idx;
self.idx += 1;

let xmem = xmem::CodePage::new();
self.xmem.insert(idx, xmem);

(self.xmem.get_mut(&idx).unwrap(), idx)
}

pub fn apply_insn(&mut self, idx: usize, insn: HostEncodedInsn) -> Result<(), AllocationError> {
self.xmem[idx].push(insn.as_slice())
self.xmem.get_mut(&idx).unwrap().push(insn.as_slice())
}

pub fn remove_code_page(&mut self, idx: usize) {
self.xmem[idx].dealloc();
self.xmem.remove(idx);
self.xmem.get_mut(&idx).unwrap().dealloc();
self.xmem.remove(&idx);
}

pub fn mark_all_pages(&mut self, state: PageState) {
for xmem in self.xmem.iter_mut() {
match state {
PageState::ReadWrite => xmem.mark_rw().unwrap(),
PageState::ReadExecute => xmem.mark_rx().unwrap(),
PageState::Invalid => xmem.mark_invalid().unwrap(),
PageState::ReadWrite => xmem.1.mark_rw().unwrap(),
PageState::ReadExecute => xmem.1.mark_rx().unwrap(),
PageState::Invalid => xmem.1.mark_invalid().unwrap(),
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/frontend/exec_core.rs
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,8 @@ impl ExecCore {
ReturnStatus::ReturnAccessViolation => {
let mut guest_exception_pc: Option<&InsnMappingData> = None;
let likely_offset = BackendCoreImpl::fastmem_violation_likely_offset();
let likely_offset_lower = likely_offset - 32;
let likely_offset_upper = likely_offset + 32;
let likely_offset_lower = likely_offset - 4;
let likely_offset_upper = likely_offset + 4;

let addr = ret.exception_address as *mut u8;

Expand Down
8 changes: 4 additions & 4 deletions src/xmem/page_allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -141,10 +141,10 @@ pub mod posix_page_allocator {
}
}

pub fn free_pages(_ptr: *mut u8, _npages: usize) {
// unsafe {
// libc::munmap(ptr as *mut _, npages * PAGE_SIZE);
// }
pub fn free_pages(ptr: *mut u8, npages: usize) {
unsafe {
libc::munmap(ptr as *mut _, npages * PAGE_SIZE);
}
}

pub fn mark_page(
Expand Down

0 comments on commit feed287

Please sign in to comment.