Skip to content

Commit

Permalink
fix: always read valid TLB entries while computing hash with uarch
Browse files Browse the repository at this point in the history
  • Loading branch information
edubart committed Jul 26, 2024
1 parent 91f6ea4 commit 3d36632
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 14 deletions.
6 changes: 6 additions & 0 deletions src/machine.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1496,7 +1496,13 @@ void machine::mark_write_tlb_dirty_pages(void) const {
const tlb_hot_entry &tlbhe = m_s.tlb.hot[TLB_WRITE][i];
if (tlbhe.vaddr_page != TLB_INVALID_PAGE) {
const tlb_cold_entry &tlbce = m_s.tlb.cold[TLB_WRITE][i];
if (tlbce.pma_index >= m_s.pmas.size()) {
throw std::runtime_error{"could not mark dirty page for a TLB entry: TLB is corrupt"};
}
pma_entry &pma = m_s.pmas[tlbce.pma_index];
if (!pma.contains(tlbce.paddr_page, PMA_PAGE_SIZE)) {
throw std::runtime_error{"could not mark dirty page for a TLB entry: TLB is corrupt"};
}
pma.mark_dirty_page(tlbce.paddr_page - pma.get_start());
}
}
Expand Down
18 changes: 9 additions & 9 deletions src/uarch-bridge.h
Original file line number Diff line number Diff line change
Expand Up @@ -598,24 +598,24 @@ class uarch_bridge {
switch (fieldoff) {
case offsetof(tlb_hot_entry, vaddr_page):
tlbhe.vaddr_page = val;
// Update vh_offset
if (val != TLB_INVALID_PAGE) {
const pma_entry &pma = find_pma_entry<uint64_t>(s, tlbce.paddr_page);
assert(pma.get_istart_M()); // TLB only works for memory mapped PMAs
const unsigned char *hpage =
pma.get_memory().get_host_memory() + (tlbce.paddr_page - pma.get_start());
tlbhe.vh_offset = cast_ptr_to_addr<uint64_t>(hpage) - tlbhe.vaddr_page;
}
return true;
default:
// Other fields like vh_offset contains host data, and cannot be written
return false;
}
} else {
switch (fieldoff) {
case offsetof(tlb_cold_entry, paddr_page): {
case offsetof(tlb_cold_entry, paddr_page):
tlbce.paddr_page = val;
// Update vh_offset
const pma_entry &pma = find_pma_entry<uint64_t>(s, tlbce.paddr_page);
assert(pma.get_istart_M()); // TLB only works for memory mapped PMAs
const unsigned char *hpage =
pma.get_memory().get_host_memory() + (tlbce.paddr_page - pma.get_start());
tlb_hot_entry &tlbhe = s.tlb.hot[etype][eidx];
tlbhe.vh_offset = cast_ptr_to_addr<uint64_t>(hpage) - tlbhe.vaddr_page;
return true;
}
case offsetof(tlb_cold_entry, pma_index):
tlbce.pma_index = val;
return true;
Expand Down
15 changes: 10 additions & 5 deletions uarch/uarch-machine-state-access.h
Original file line number Diff line number Diff line change
Expand Up @@ -709,8 +709,8 @@ class uarch_machine_state_access : public i_state_access<uarch_machine_state_acc
template <TLB_entry_type ETYPE>
unsigned char *do_replace_tlb_entry(uint64_t vaddr, uint64_t paddr, uarch_pma_entry &pma) {
uint64_t eidx = tlb_get_entry_index(vaddr);
volatile tlb_hot_entry &tlbhe = do_get_tlb_hot_entry<ETYPE>(eidx);
volatile tlb_cold_entry &tlbce = do_get_tlb_entry_cold<ETYPE>(eidx);
volatile tlb_hot_entry &tlbhe = do_get_tlb_hot_entry<ETYPE>(eidx);
// Mark page that was on TLB as dirty so we know to update the Merkle tree
if constexpr (ETYPE == TLB_WRITE) {
if (tlbhe.vaddr_page != TLB_INVALID_PAGE) {
Expand All @@ -720,11 +720,16 @@ class uarch_machine_state_access : public i_state_access<uarch_machine_state_acc
}
uint64_t vaddr_page = vaddr & ~PAGE_OFFSET_MASK;
uint64_t paddr_page = paddr & ~PAGE_OFFSET_MASK;
tlbhe.vaddr_page = vaddr_page;
// The paddr_must field must be written only after vaddr_page is written,
// because the uarch memory bridge reads vaddr_page to compute vh_offset when updating paddr_page.
tlbce.paddr_page = paddr_page;
// Both pma_index and paddr_page MUST BE written while its state is invalidated,
// otherwise TLB entry may be read in an incomplete state when computing root hash
// while stepping over this function.
// To do this we first invalidate TLB state before these fields are written to "lock",
// and "unlock" by writing a valid vaddr_page.
tlbhe.vaddr_page = TLB_INVALID_PAGE; // "lock", DO NOT OPTIMIZE OUT THIS LINE
tlbce.pma_index = static_cast<uint64_t>(pma.get_index());
tlbce.paddr_page = paddr_page;
// The write to vaddr_page MUST BE the last TLB entry write.
tlbhe.vaddr_page = vaddr_page; // "unlock"
// Note that we can't write here the correct vh_offset value, because it depends in a host pointer,
// however the uarch memory bridge will take care of updating it.
return cast_addr_to_ptr<unsigned char*>(paddr_page);
Expand Down

0 comments on commit 3d36632

Please sign in to comment.