-
Notifications
You must be signed in to change notification settings - Fork 1.1k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Drive protection user bits on TL #2383
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,6 +4,7 @@ package freechips.rocketchip.rocket | |
|
||
import Chisel._ | ||
import Chisel.ImplicitConversions._ | ||
import freechips.rocketchip.amba._ | ||
import freechips.rocketchip.config.Parameters | ||
import freechips.rocketchip.diplomacy._ | ||
import freechips.rocketchip.diplomaticobjectmodel.model.OMSRAM | ||
|
@@ -551,6 +552,20 @@ class DCacheModule(outer: DCache) extends HellaCacheModule(outer) { | |
Mux(s2_req.cmd === M_PWR, putpartial, | ||
Mux(!s2_read, put, atomics)))) | ||
|
||
// Drive APROT Bits | ||
tl_out_a.bits.user.lift(AMBAProt).foreach { x => | ||
val user_bit_cacheable = edge.manager.supportsAcquireTFast(access_address, a_size) | ||
|
||
x.privileged := s2_req.dprv === PRV.M || user_bit_cacheable | ||
x.cacheable := user_bit_cacheable | ||
|
||
// Following are always tied off | ||
x.fetch := false.B | ||
x.secure := true.B | ||
x.bufferable := false.B | ||
x.modifiable := false.B | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I wonder whether bufferable/modifiable should be !user_bit_cacheable ? |
||
} | ||
|
||
// Set pending bits for outstanding TileLink transaction | ||
val a_sel = UIntToOH(a_source, maxUncachedInFlight+mmioOffset) >> mmioOffset | ||
when (tl_out_a.fire()) { | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -5,6 +5,7 @@ package freechips.rocketchip.rocket | |
|
||
import Chisel._ | ||
import Chisel.ImplicitConversions._ | ||
import freechips.rocketchip.amba._ | ||
import freechips.rocketchip.config.Parameters | ||
import freechips.rocketchip.diplomacy._ | ||
import freechips.rocketchip.tile._ | ||
|
@@ -52,9 +53,12 @@ class ICacheErrors(implicit p: Parameters) extends CoreBundle()(p) | |
|
||
class ICache(val icacheParams: ICacheParams, val hartId: Int)(implicit p: Parameters) extends LazyModule { | ||
lazy val module = new ICacheModule(this) | ||
val masterNode = TLClientNode(Seq(TLMasterPortParameters.v1(Seq(TLMasterParameters.v1( | ||
sourceId = IdRange(0, 1 + icacheParams.prefetch.toInt), // 0=refill, 1=hint | ||
name = s"Core ${hartId} ICache"))))) | ||
val useVM = p(TileKey).core.useVM | ||
val masterNode = TLClientNode(Seq(TLMasterPortParameters.v1( | ||
clients = Seq(TLMasterParameters.v1( | ||
sourceId = IdRange(0, 1 + icacheParams.prefetch.toInt), // 0=refill, 1=hint | ||
name = s"Core ${hartId} ICache")), | ||
requestFields = useVM.option(Seq()).getOrElse(Seq(AMBAProtField()))))) | ||
|
||
val size = icacheParams.nSets * icacheParams.nWays * icacheParams.blockBytes | ||
val itim_control_offset = size - icacheParams.nSets * icacheParams.blockBytes | ||
|
@@ -120,6 +124,8 @@ class ICacheBundle(val outer: ICache) extends CoreBundle()(outer.p) { | |
|
||
val clock_enabled = Bool(INPUT) | ||
val keep_clock_enabled = Bool(OUTPUT) | ||
|
||
val privileged = Bool(INPUT) | ||
} | ||
|
||
class ICacheModule(outer: ICache) extends LazyModuleImp(outer) | ||
|
@@ -406,6 +412,7 @@ class ICacheModule(outer: ICache) extends LazyModuleImp(outer) | |
fromSource = UInt(0), | ||
toAddress = (refill_paddr >> blockOffBits) << blockOffBits, | ||
lgSize = lgCacheBlockBytes)._2 | ||
|
||
if (cacheParams.prefetch) { | ||
val (crosses_page, next_block) = Split(refill_paddr(pgIdxBits-1, blockOffBits) +& 1, pgIdxBits-blockOffBits) | ||
when (tl_out.a.fire()) { | ||
|
@@ -436,6 +443,19 @@ class ICacheModule(outer: ICache) extends LazyModuleImp(outer) | |
ccover(!refill_valid && (tl_out.d.fire() && !refill_one_beat), "PREFETCH_D_AFTER_MISS_D", "I$ prefetch resolves after miss") | ||
ccover(tl_out.a.fire() && hint_outstanding, "PREFETCH_D_AFTER_MISS_A", "I$ prefetch resolves after second miss") | ||
} | ||
// Drive APROT information | ||
tl_out.a.bits.user.lift(AMBAProt).foreach { x => | ||
val user_bit_cacheable = edge_out.manager.supportsAcquireTFast(refill_paddr, lgCacheBlockBytes.U) | ||
|
||
x.privileged := io.privileged || user_bit_cacheable // privileged if machine mode or memory port | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. oh i see the comment here. so we are assuming everythign cacheable is going to memory port? If not this comment is a bit misleading There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, currently we are assuming any cacheable thing is going to the memory port. During a short discussion that I had with @hcook he mentioned that it's possible this may change in the future if we add other types of ports. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It seems it should be the opposite for safety, i.e. if you cannot figure out the exact privilege level, you should assume the lowest level of privilege. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't understand ARM's reasoning behind their decision, but we are making this change to be more compatible with ARM behavior. So I think for that reason we should follow the same thing. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I am confused. Since we cache everything on fetch, why is privileged not just tied to true? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @terpstra, @aswaterman posed a similar question but was unsure. |
||
x.cacheable := user_bit_cacheable | ||
|
||
// Following are always tied off | ||
x.fetch := true.B | ||
x.secure := true.B | ||
x.bufferable := false.B | ||
x.modifiable := false.B | ||
} | ||
tl_out.b.ready := Bool(true) | ||
tl_out.c.valid := Bool(false) | ||
tl_out.e.valid := Bool(false) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
so all cacheable accesses are also by definition privileged?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The relevant point from the AMBA spec is that if a master cannot generate accurate protection information, it is recommended that we mark the transaction as privileged. Because caches like the L2 can merge requests from multiple different privilege modes, we can't accurately provide the protection information.