diff options
| author | Prashanth Mundkur | 2018-06-07 12:56:29 -0700 |
|---|---|---|
| committer | Prashanth Mundkur | 2018-06-07 13:13:23 -0700 |
| commit | 910a5d2976e9599cddaaac9549e790b78c1240dd (patch) | |
| tree | 7cd799cedfa4cd2566330fcd8592b80830525e1d /riscv | |
| parent | b96a2672bebac3331c539ddbffaa678f43704d5f (diff) | |
Update physical memory and address translation for MMIO.
- Assume for now that atomic accesses are only to memory regions, to leave
their effects unchanged.
- The top-level mem_read and mem_write functions for physical memory now
have rreg and wreg effects due to MMIO (due to reads/writes to device registers).
It would be nice to have a separate construct for non-CPU-register state to avoid
polluting the footprint.
- Assume for now that page-table walks access physical memory regions only, and
not MMIO regions. Leave a fixme note there to address this later, perhaps when
PMP/PMA is added.
Diffstat (limited to 'riscv')
| -rw-r--r-- | riscv/riscv_mem.sail | 73 | ||||
| -rw-r--r-- | riscv/riscv_vmem.sail | 3 |
2 files changed, 56 insertions, 20 deletions
diff --git a/riscv/riscv_mem.sail b/riscv/riscv_mem.sail index 10fd98fe..0fc9ddbc 100644 --- a/riscv/riscv_mem.sail +++ b/riscv/riscv_mem.sail @@ -3,13 +3,34 @@ function is_aligned_addr (addr : xlenbits, width : atom('n)) -> forall 'n. bool = unsigned(addr) % width == 0 -function checked_mem_read(t : ReadType, addr : xlenbits, width : atom('n)) -> forall 'n. MemoryOpResult(bits(8 * 'n)) = +// only used for actual memory regions, to avoid MMIO effects +function phys_mem_read(t : ReadType, addr : xlenbits, width : atom('n)) -> forall 'n. MemoryOpResult(bits(8 * 'n)) = match (t, __RISCV_read(addr, width)) { (Instruction, None()) => MemException(E_Fetch_Access_Fault), (Data, None()) => MemException(E_Load_Access_Fault), (_, Some(v)) => MemValue(v) } +function checked_mem_read(t : ReadType, addr : xlenbits, width : atom('n)) -> forall 'n. MemoryOpResult(bits(8 * 'n)) = { + /* Consult physical memory map to dispatch MMIO and (TODO) handle PMP/PMA. */ + if within_phys_mem(addr, width) + then phys_mem_read(t, addr, width) + /* treat MMIO regions as not executable for now. + TODO: this should actually come from PMP/PMA. */ + else if t == Data then { + if within_clint(addr, width) + then clint_load(addr, width) + /* todo: handle constraint on 'n + else if within_htif_readable(addr, width) + then htif_load(addr, width) + */ + else MemException(E_Load_Access_Fault) + } + else MemException(E_Load_Access_Fault) +} + +/* FIXME: We assume atomic accesses are only done to memory-backed regions. MMIO is not modeled. */ + val MEMr : forall 'n. (xlenbits, atom('n)) -> MemoryOpResult(bits(8 * 'n)) effect {rmem} val MEMr_acquire : forall 'n. (xlenbits, atom('n)) -> MemoryOpResult(bits(8 * 'n)) effect {rmem} val MEMr_strong_acquire : forall 'n. (xlenbits, atom('n)) -> MemoryOpResult(bits(8 * 'n)) effect {rmem} @@ -17,20 +38,21 @@ val MEMr_reserved : forall 'n. (xlenbits, atom('n)) -> MemoryOpRe val MEMr_reserved_acquire : forall 'n. (xlenbits, atom('n)) -> MemoryOpResult(bits(8 * 'n)) effect {rmem} val MEMr_reserved_strong_acquire : forall 'n. (xlenbits, atom('n)) -> MemoryOpResult(bits(8 * 'n)) effect {rmem} -function MEMr (addr, width) = checked_mem_read(Data, addr, width) -function MEMr_acquire (addr, width) = checked_mem_read(Data, addr, width) -function MEMr_strong_acquire (addr, width) = checked_mem_read(Data, addr, width) -function MEMr_reserved (addr, width) = checked_mem_read(Data, addr, width) -function MEMr_reserved_acquire (addr, width) = checked_mem_read(Data, addr, width) -function MEMr_reserved_strong_acquire (addr, width) = checked_mem_read(Data, addr, width) +function MEMr (addr, width) = phys_mem_read(Data, addr, width) +function MEMr_acquire (addr, width) = phys_mem_read(Data, addr, width) +function MEMr_strong_acquire (addr, width) = phys_mem_read(Data, addr, width) +function MEMr_reserved (addr, width) = phys_mem_read(Data, addr, width) +function MEMr_reserved_acquire (addr, width) = phys_mem_read(Data, addr, width) +function MEMr_reserved_strong_acquire (addr, width) = phys_mem_read(Data, addr, width) -val mem_read : forall 'n. (xlenbits, atom('n), bool, bool, bool) -> MemoryOpResult(bits(8 * 'n)) effect {rmem, escape} +/* NOTE: The rreg effect is due to MMIO. */ +val mem_read : forall 'n, 'n > 0. (xlenbits, atom('n), bool, bool, bool) -> MemoryOpResult(bits(8 * 'n)) effect {rmem, rreg, escape} function mem_read (addr, width, aq, rl, res) = { if (aq | res) & (~ (is_aligned_addr(addr, width))) then MemException(E_Load_Addr_Align) else match (aq, rl, res) { - (false, false, false) => MEMr(addr, width), + (false, false, false) => checked_mem_read(Data, addr, width), (true, false, false) => MEMr_acquire(addr, width), (false, false, true) => MEMr_reserved(addr, width), (true, false, true) => MEMr_reserved_acquire(addr, width), @@ -71,11 +93,24 @@ function mem_write_ea (addr, width, aq, rl, con) = { } } -function checked_mem_write(addr : xlenbits, width : atom('n), data: bits(8 * 'n)) -> forall 'n. MemoryOpResult(unit) = +// only used for actual memory regions, to avoid MMIO effects +function phys_mem_write(addr : xlenbits, width : atom('n), data: bits(8 * 'n)) -> forall 'n. MemoryOpResult(unit) = if (__RISCV_write(addr, width, data)) then MemValue(()) else MemException(E_SAMO_Access_Fault) +function checked_mem_write(addr : xlenbits, width : atom('n), data: bits(8 * 'n)) -> forall 'n, 'n > 0. MemoryOpResult(unit) = + /* Consult physical memory map to dispatch MMIO and (TODO) handle PMP/PMA. */ + if within_phys_mem(addr, width) + then phys_mem_write(addr, width, data) + else if within_clint(addr, width) + then clint_store(addr, width, data) + else if within_htif_writable(addr, width) & sizeof('n) <= 8 // FIXME + then htif_store(addr, width, data) + else MemException(E_SAMO_Access_Fault) + +/* FIXME: We assume atomic accesses are only done to memory-backed regions. MMIO is not modeled. */ + val MEMval : forall 'n. (xlenbits, atom('n), bits(8 * 'n)) -> MemoryOpResult(unit) effect {wmv} val MEMval_release : forall 'n. (xlenbits, atom('n), bits(8 * 'n)) -> MemoryOpResult(unit) effect {wmv} val MEMval_strong_release : forall 'n. (xlenbits, atom('n), bits(8 * 'n)) -> MemoryOpResult(unit) effect {wmv} @@ -83,21 +118,21 @@ val MEMval_conditional : forall 'n. (xlenbits, atom('n), bits(8 * val MEMval_conditional_release : forall 'n. (xlenbits, atom('n), bits(8 * 'n)) -> MemoryOpResult(unit) effect {wmv} val MEMval_conditional_strong_release : forall 'n. (xlenbits, atom('n), bits(8 * 'n)) -> MemoryOpResult(unit) effect {wmv} -function MEMval (addr, width, data) = checked_mem_write(addr, width, data) -function MEMval_release (addr, width, data) = checked_mem_write(addr, width, data) -function MEMval_strong_release (addr, width, data) = checked_mem_write(addr, width, data) -function MEMval_conditional (addr, width, data) = checked_mem_write(addr, width, data) -function MEMval_conditional_release (addr, width, data) = checked_mem_write(addr, width, data) -function MEMval_conditional_strong_release (addr, width, data) = checked_mem_write(addr, width, data) - +function MEMval (addr, width, data) = phys_mem_write(addr, width, data) +function MEMval_release (addr, width, data) = phys_mem_write(addr, width, data) +function MEMval_strong_release (addr, width, data) = phys_mem_write(addr, width, data) +function MEMval_conditional (addr, width, data) = phys_mem_write(addr, width, data) +function MEMval_conditional_release (addr, width, data) = phys_mem_write(addr, width, data) +function MEMval_conditional_strong_release (addr, width, data) = phys_mem_write(addr, width, data) -val mem_write_value : forall 'n. (xlenbits, atom('n), bits(8 * 'n), bool, bool, bool) -> MemoryOpResult(unit) effect {wmv, escape} +/* NOTE: The wreg effect is due to MMIO. */ +val mem_write_value : forall 'n, 'n > 0. (xlenbits, atom('n), bits(8 * 'n), bool, bool, bool) -> MemoryOpResult(unit) effect {wmv, wreg, escape} function mem_write_value (addr, width, value, aq, rl, con) = { if (rl | con) & (~ (is_aligned_addr(addr, width))) then MemException(E_SAMO_Addr_Align) else match (aq, rl, con) { - (false, false, false) => MEMval(addr, width, value), + (false, false, false) => checked_mem_write(addr, width, value), (false, true, false) => MEMval_release(addr, width, value), (false, false, true) => MEMval_conditional(addr, width, value), (false, true, true) => MEMval_conditional_release(addr, width, value), diff --git a/riscv/riscv_vmem.sail b/riscv/riscv_vmem.sail index 4fb7b5d5..d5efe252 100644 --- a/riscv/riscv_vmem.sail +++ b/riscv/riscv_vmem.sail @@ -131,7 +131,8 @@ function walk39(vaddr, ac, priv, mxr, do_sum, ptb, level, global) -> PTW_Result let pt_ofs : paddr39 = shiftl(EXTZ(shiftr(va.VPNi(), (level * SV39_LEVEL_BITS))[(SV39_LEVEL_BITS - 1) .. 0]), PTE39_LOG_SIZE); let pte_addr = ptb + pt_ofs; - match (checked_mem_read(Data, EXTZ(pte_addr), 8)) { + /* FIXME: we assume here that walks only access memory-backed addresses. */ + match (phys_mem_read(Data, EXTZ(pte_addr), 8)) { MemException(_) => PTW_Failure(PTW_Access), MemValue(v) => { let pte = Mk_SV39_PTE(v); |
