earlgrey/epmp.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! The EarlGrey SoC ePMP implementation.
6//!
7//! Refer to the main [`EarlGreyEPMP`] struct documentation.
8
9use core::cell::Cell;
10use core::fmt;
11use core::marker::PhantomData;
12use kernel::platform::mpu;
13use kernel::utilities::registers::FieldValue;
14use rv32i::csr;
15use rv32i::pmp::{
16 format_pmp_entries, pmpcfg_octet, NAPOTRegionSpec, TORRegionSpec, TORUserPMP, TORUserPMPCFG,
17};
18
19// ---------- EarlGrey ePMP implementation named constants ---------------------
20//
21// The ePMP implementation (in part) relies on these constant values. Simply
22// changing them here may break the implementation below.
23const PMP_ENTRIES: usize = 16;
24const PMP_ENTRIES_OVER_TWO: usize = 8;
25const TOR_USER_REGIONS_DEBUG_ENABLE: usize = 4;
26const TOR_USER_REGIONS_DEBUG_DISABLE: usize = 4;
27const TOR_USER_ENTRIES_OFFSET_DEBUG_ENABLE: usize = 0;
28const TOR_USER_ENTRIES_OFFSET_DEBUG_DISABLE: usize = 4;
29
30// ---------- EarlGrey ePMP memory region wrapper types ------------------------
31//
32// These types exist primarily to avoid argument confusion in the
33// [`EarlGreyEPMP`] constructor, which accepts the addresses of these memory
34// regions as arguments. They further encode whether a region must adhere to the
35// `NAPOT` or `TOR` addressing mode constraints:
36
37/// The EarlGrey SOC's flash memory region address range.
38///
39/// Configured in the PMP as a `NAPOT` region.
40#[derive(Copy, Clone, Debug)]
41pub struct FlashRegion(pub NAPOTRegionSpec);
42
43/// The EarlGrey SOC's RAM region address range.
44///
45/// Configured in the PMP as a `NAPOT` region.
46#[derive(Copy, Clone, Debug)]
47pub struct RAMRegion(pub NAPOTRegionSpec);
48
49/// The EarlGrey SOC's MMIO region address range.
50///
51/// Configured in the PMP as a `NAPOT` region.
52#[derive(Copy, Clone, Debug)]
53pub struct MMIORegion(pub NAPOTRegionSpec);
54
55/// The EarlGrey SOC's PMP region specification for the kernel `.text` section.
56///
57/// This is to be made accessible to machine-mode as read-execute. Configured in
58/// the PMP as a `TOR` region.
59#[derive(Copy, Clone, Debug)]
60pub struct KernelTextRegion(pub TORRegionSpec);
61
62/// The EarlGrey SOC's RISC-V Debug Manager memory region.
63///
64/// Configured in the PMP as a read/write/execute `NAPOT` region. Because R/W/X
65/// regions are not supported in machine-mode lockdown (MML) mode, to enable
66/// JTAG debugging, the generic [`EPMPDebugConfig`] argument must be set to
67/// [`EPMPDebugEnable`], which will configure the ePMP to operate in non
68/// machine-mode lockdown (MML), but still machine-mode whitelist policy (MMWP),
69/// instead.
70#[derive(Copy, Clone, Debug)]
71pub struct RVDMRegion(pub NAPOTRegionSpec);
72
73// ---------- EarlGrey SoC ePMP JTAG Debugging Configuration -------------------
74
75/// EarlGrey SoC ePMP JTAG Debugging Configuration
76///
77/// The EarlGrey SoC includes a RISC-V Debug Manager mapped to a NAPOT-aligned
78/// memory region. To use a JTAG-debugger with the EarlGrey SoC, this region
79/// needs to be allowed as R/W/X in the ePMP, at least for machine-mode.
80/// However, the RISC-V ePMP does not support R/W/X regions when in machine-mode
81/// lockdown (MML) mode. Furthermore, with the machine-mode whitelist policy
82/// (MMWP) enabled, machine-mode (the kernel) must be given explicit access for
83/// any memory regions to be accessed.
84///
85/// Thus, to enable debugger access, the following changes have to be made in
86/// the EarlGrey ePMP from its default locked-down configuration:
87///
88/// - Machine-Mode Lockdown (MML) must not be enabled
89///
90/// - A locked (machine-mode) PMP memory region must be allocated for the RISC-V
91/// Debug Manager (RVDM) allocated, and be given R/W/X permissions.
92///
93/// - Locked regions are enforced & locked for both machine-mode and
94/// user-mode. This means that we can no longer use locked regions in
95/// combination with the machine-mode whitelist policy to take away access
96/// permissions from user-mode. This means that we need to place all user-mode
97/// regions as non-locked regions _in front of_ all locked machine-mode
98/// regions, and insert a "deny-all" non-locked fallback user-mode region in
99/// between to achieve our desired isolation properties.
100///
101/// As a consequence, because of this "deny-all" user-mode region, we have one
102/// fewer memory regions available to be used as a userspace MPU.
103///
104/// Because all of this is much too complex to implement at runtime (and can't
105/// be reconfigured at runtime once MML is configured), we define a new trait
106/// [`EPMPDebugConfig`] with two implementations [`EPMPDebugEnable`] and
107/// [`EPMPDebugDisable`]. The EPMP implementation is generic over those traits
108/// and can, for instance, advertise a different number of MPU regions available
109/// for userspace. It further contains a method to retrieve the RVDM memory
110/// region's NAPOT address specification irrespective of whether the debug
111/// memory is enabled, and an associated constant to use in the configuration
112/// code (such that the branches not taken can be optimized out).
113pub trait EPMPDebugConfig {
114 /// Whether the debug port shall be enabled or not.
115 const DEBUG_ENABLE: bool;
116
117 /// How many userspace MPU (TOR) regions are available under this
118 /// configuration.
119 const TOR_USER_REGIONS: usize;
120
121 /// The offset where the user-mode TOR PMP entries start. This counts
122 /// "entries", meaning `pmpaddrX` registers. A single "TOR region" uses two
123 /// consecutive "entries".
124 const TOR_USER_ENTRIES_OFFSET: usize;
125}
126
127pub enum EPMPDebugEnable {}
128impl EPMPDebugConfig for EPMPDebugEnable {
129 const DEBUG_ENABLE: bool = true;
130 const TOR_USER_REGIONS: usize = TOR_USER_REGIONS_DEBUG_ENABLE;
131 const TOR_USER_ENTRIES_OFFSET: usize = TOR_USER_ENTRIES_OFFSET_DEBUG_ENABLE;
132}
133
134pub enum EPMPDebugDisable {}
135impl EPMPDebugConfig for EPMPDebugDisable {
136 const DEBUG_ENABLE: bool = false;
137 const TOR_USER_REGIONS: usize = TOR_USER_REGIONS_DEBUG_DISABLE;
138 const TOR_USER_ENTRIES_OFFSET: usize = TOR_USER_ENTRIES_OFFSET_DEBUG_DISABLE;
139}
140
141/// EarlGrey ePMP Configuration Errors
142#[derive(Debug, Copy, Clone)]
143pub enum EarlGreyEPMPError {
144 /// The ePMP driver cannot be instantiated because of an unexpected
145 /// `mseccfg` register value.
146 InvalidInitialMseccfgValue,
147 /// The ePMP driver cannot be instantiated because of an unexpected `pmpcfg`
148 /// register value (where the `usize` value contains the index of the
149 /// `pmpcfg` register).
150 InvalidInitialPmpcfgValue(usize),
151 /// The ePMP registers do not match their expected values after
152 /// configuration. The system cannot be assumed to be in a secure state.
153 SanityCheckFail,
154}
155
156/// RISC-V ePMP memory protection implementation for the EarlGrey SoC.
157///
158/// The EarlGrey ePMP implementation hard-codes many assumptions about the
159/// behavior and state of the underlying hardware, to reduce complexity of this
160/// codebase, and improve its security, reliability and auditability.
161///
162/// Namely, it makes and checks assumptions about the machine security policy
163/// prior to its initialization routine, locks down the hardware through a
164/// static set of PMP configuration steps, and then exposes a subset of regions
165/// for user-mode protection through the `PMPUserMPU` trait.
166///
167/// The EarlGrey ePMP implementation supports JTAG debug-port access through the
168/// integrated RISC-V Debug Manger (RVDM) core, which requires R/W/X-access to a
169/// given region of memory in machine-mode and user-mode. The [`EarlGreyEPMP`]
170/// struct accepts a generic [`EPMPDebugConfig`] implementation, which either
171/// enables (in the case of [`EPMPDebugEnable`]) or disables
172/// ([`EPMPDebugDisable`]) the debug-port access. However, enabling debug-port
173/// access can potentially weaken the system's security by not enabling
174/// machine-mode lockdown (MML), and uses an additional PMP region otherwise
175/// available to userspace. See the documentation of [`EPMPDebugConfig`] for
176/// more information on this.
177///
178/// ## ePMP Region Layout & Configuration (`EPMPDebugDisable` mode)
179///
180/// Because of the machine-mode lockdown (MML) mode, no region can have R/W/X
181/// permissions. The machine-mode whitelist policy (MMWP) further requires all
182/// memory accessed by machine-mode to have a corresponding locked PMP entry
183/// defined. Lower-indexed PMP entires have precedence over entries with higher
184/// indices. Under MML mode, a non-locked (user-mode) entry prevents
185/// machine-mode access to that memory. Thus, the ePMP is to be configured in a
186/// "sandwiched" layout (with decreasing precedence):
187///
188/// 1. High-priority machine-mode "lockdown" entries.
189///
190/// These entries are only accessible to machine mode. Once locked, they can
191/// only be changed through a hart reset. Examples for such memory sections
192/// can be the kernel's `.text` or certain RAM (e.g. stack) sections.
193///
194/// 2. Tock's user-mode "MPU"
195///
196/// This section defines entries corresponding to memory sections made
197/// accessible to user-mode. These entires are exposed through the
198/// implementation of the `TORUserPMP` trait.
199///
200/// **Effectively, this is Tock's "MPU" sandwiched in between the
201/// high-priority and low-priority PMP sections.**
202///
203/// These entires are not locked and must be turned off prior to the kernel
204/// being able to access them.
205///
206/// This section must take precende over the lower kernel-mode entries, as
207/// these entries are aliased by the lower kernel-mode entries. Having a
208/// locked machine-mode entry take precende over an alias a user-space one
209/// prevents user-mode from accessing the aliased memory.
210///
211/// 3. Low-priority machine-mode "accessability" entires.
212///
213/// These entires provide the kernel access to memory regions which are
214/// (partially) aliased by user-mode regions above. This allows for
215/// implementing memory sharing between userspace and the kernel (moving
216/// acccess to user-mode by turning on a region above, and falling back onto
217/// these rules when turning the user-mode region off).
218///
219/// These regions can be granular (e.g. grant R/W on the entire RAM), but
220/// should not provide any excess permissions where not required (e.g. avoid
221/// granting R/X on flash-memory where only R is required, because the
222/// kernel-text is already marked as R/X in the high-priority regions above.
223///
224/// Because the ROM_EXT and test ROM set up different ePMP configs, there are
225/// separate initialization routines (`new` and `new_test_rom`) for those
226/// environments.
227///
228/// `new` (only available when the debug-port is disabled) attempts to set up
229/// the following memory protection rules and layout:
230///
231/// - `msseccfg` CSR:
232///
233/// ```text
234/// |-----+-----------------------------------------------------------+-------|
235/// | BIT | LABEL | STATE |
236/// |-----+-----------------------------------------------------------+-------|
237/// | 0 | Machine-Mode Lockdown (MML) | 1 |
238/// | 1 | Machine-Mode Whitelist Policy (MMWP) | 1 |
239/// | 2 | Rule-Lock Bypass (RLB) | 0 |
240/// |-----+-----------------------------------------------------------+-------|
241/// ```
242///
243/// - `pmpcfgX` / `pmpaddrX` CSRs:
244///
245/// ```text
246/// |-------+----------------------------------------+-----------+---+-------|
247/// | ENTRY | REGION / ADDR | MODE | L | PERMS |
248/// |-------+----------------------------------------+-----------+---+-------|
249/// | 0 | Locked by the ROM_EXT or unused | NAPOT/OFF | X | |
250/// | | | | | |
251/// | 1 | Locked by the ROM_EXT or unused | NAPOT/OFF | X | |
252/// | | | | | |
253/// | 2 | -------------------------------------- | OFF | X | ----- |
254/// | 3 | Kernel .text section | TOR | X | R/X |
255/// | | | | | |
256/// | 4 | / \ | OFF | | |
257/// | 5 | \ Userspace TOR region #0 / | TOR | | ????? |
258/// | | | | | |
259/// | 6 | / \ | OFF | | |
260/// | 7 | \ Userspace TOR region #1 / | TOR | | ????? |
261/// | | | | | |
262/// | 8 | / \ | OFF | | |
263/// | 9 | \ Userspace TOR region #2 / | TOR | | ????? |
264/// | | | | | |
265/// | 10 | / \ | OFF | | |
266/// | 11 | \ Userspace TOR region #3 / | TOR | | ????? |
267/// | | | | | |
268/// | 12 | FLASH (spanning kernel & apps) | NAPOT | X | R |
269/// | | | | | |
270/// | 13 | -------------------------------------- | OFF | X | ----- |
271/// | | | | | |
272/// | 14 | RAM (spanning kernel & apps) | NAPOT | X | R/W |
273/// | | | | | |
274/// | 15 | MMIO | NAPOT | X | R/W |
275/// |-------+----------------------------------------+-----------+---+-------|
276/// ```
277///
278/// `new_test_rom` (only available when the debug-port is disabled) attempts to
279/// set up the following memory protection rules and layout:
280///
281/// - `msseccfg` CSR:
282///
283/// ```text
284/// |-----+-----------------------------------------------------------+-------|
285/// | BIT | LABEL | STATE |
286/// |-----+-----------------------------------------------------------+-------|
287/// | 0 | Machine-Mode Lockdown (MML) | 1 |
288/// | 1 | Machine-Mode Whitelist Policy (MMWP) | 1 |
289/// | 2 | Rule-Lock Bypass (RLB) | 0 |
290/// |-----+-----------------------------------------------------------+-------|
291/// ```
292///
293/// - `pmpcfgX` / `pmpaddrX` CSRs:
294///
295/// ```text
296/// |-------+---------------------------------------------+-------+---+-------|
297/// | ENTRY | REGION / ADDR | MODE | L | PERMS |
298/// |-------+---------------------------------------------+-------+---+-------|
299/// | 0 | ------------------------------------------- | OFF | X | ----- |
300/// | 1 | Kernel .text section | TOR | X | R/X |
301/// | | | | | |
302/// | 2 | ------------------------------------------- | OFF | X | |
303/// | | | | | |
304/// | 3 | ------------------------------------------- | OFF | X | |
305/// | | | | | |
306/// | 4 | / \ | OFF | | |
307/// | 5 | \ Userspace TOR region #0 / | TOR | | ????? |
308/// | | | | | |
309/// | 6 | / \ | OFF | | |
310/// | 7 | \ Userspace TOR region #1 / | TOR | | ????? |
311/// | | | | | |
312/// | 8 | / \ | OFF | | |
313/// | 9 | \ Userspace TOR region #2 / | TOR | | ????? |
314/// | | | | | |
315/// | 10 | / \ | OFF | | |
316/// | 11 | \ Userspace TOR region #3 / | TOR | | ????? |
317/// | | | | | |
318/// | 12 | ------------------------------------------- | OFF | X | ----- |
319/// | | | | | |
320/// | 13 | FLASH (spanning kernel & apps) | NAPOT | X | R |
321/// | | | | | |
322/// | 14 | RAM (spanning kernel & apps) | NAPOT | X | R/W |
323/// | | | | | |
324/// | 15 | MMIO | NAPOT | X | R/W |
325/// |-------+---------------------------------------------+-------+---+-------|
326/// ```
327///
328/// ## ePMP Region Layout & Configuration (`EPMPDebugEnable` mode)
329///
330/// When enabling the RISC-V Debug Manager (JTAG debug port), the ePMP must be
331/// configured differently. This is because the `RVDM` requires a memory section
332/// to be mapped with read-write-execute privileges, which is not possible under
333/// the machine-mode lockdown (MML) mode. However, when simply disabling MML in
334/// the above policy, it would grant userspace access to kernel memory through
335/// the locked PMP entires. We still need to define locked PMP entries to grant
336/// the kernel (machine-mode) access to its required memory regions, as the
337/// machine-mode whitelist policy (MMWP) is enabled.
338///
339/// Thus we split the PMP entires into three parts, as outlined in the
340/// following:
341///
342/// 1. Tock's user-mode "MPU"
343///
344/// This section defines entries corresponding to memory sections made
345/// accessible to user-mode. These entires are exposed through the
346/// implementation of the `TORUserPMP` trait.
347///
348/// These entires are not locked. Because the machine-mode lockdown (MML)
349/// mode is not enabled, non-locked regions are ignored in machine-mode. The
350/// kernel does not have to disable these entires prior to being able to
351/// access them.
352///
353/// This section must take precende over the lower kernel-mode entries, as
354/// these entries are aliased by the lower kernel-mode entries. Having a
355/// locked machine-mode entry take precende over an alias a user-space one
356/// prevents user-mode from accessing the aliased memory.
357///
358/// 2. User-mode "deny-all" rule.
359///
360/// Without machine-mode lockdown (MML) mode, locked regions apply to both
361/// user- and kernel-mode. Because the machine-mode whitelist policy (MMWP)
362/// is enabled, the kernel must be granted explicit permission to access
363/// memory (default-deny policy). This means that we must prevent any
364/// user-mode access from "falling through" to kernel-mode regions. For this
365/// purpose, we insert a non-locked "deny-all" rule which disallows all
366/// user-mode accesses to the entire address space, if no other
367/// higher-priority user-mode rule matches.
368///
369/// 3. Machine-mode "accessability" entires.
370///
371/// These entires provide the kernel access to certain memory regions, as
372/// required by the machine-mode whitelist policy (MMWP).
373///
374/// `new_debug` (only available when the debug-port is enabled) attempts to set
375/// up the following memory protection rules and layout:
376///
377/// - `msseccfg` CSR:
378///
379/// ```text
380/// |-----+-----------------------------------------------------------+-------|
381/// | BIT | LABEL | STATE |
382/// |-----+-----------------------------------------------------------+-------|
383/// | 0 | Machine-Mode Lockdown (MML) | 0 |
384/// | 1 | Machine-Mode Whitelist Policy (MMWP) | 1 |
385/// | 2 | Rule-Lock Bypass (RLB) | 0 |
386/// |-----+-----------------------------------------------------------+-------|
387/// ```
388///
389/// - `pmpcfgX` / `pmpaddrX` CSRs:
390///
391/// ```text
392/// |-------+---------------------------------------------+-------+---+-------|
393/// | ENTRY | REGION / ADDR | MODE | L | PERMS |
394/// |-------+---------------------------------------------+-------+---+-------|
395/// | 0 | / \ | OFF | | |
396/// | 1 | \ Userspace TOR region #0 / | TOR | | ????? |
397/// | | | | | |
398/// | 2 | / \ | OFF | | |
399/// | 3 | \ Userspace TOR region #1 / | TOR | | ????? |
400/// | | | | | |
401/// | 4 | / \ | OFF | | |
402/// | 5 | \ Userspace TOR region #2 / | TOR | | ????? |
403/// | | | | | |
404/// | 6 | / \ | OFF | | |
405/// | 7 | \ Userspace TOR region #3 / | TOR | | ????? |
406/// | | | | | |
407/// | 8 | ------------------------------------------- | OFF | | ----- |
408/// | | | | | |
409/// | 9 | "Deny-all" user-mode rule (all memory) | NAPOT | | ----- |
410/// | | | | | |
411/// | 10 | ------------------------------------------- | OFF | X | ----- |
412/// | 11 | Kernel .text section | TOR | X | R/X |
413/// | | | | | |
414/// | 12 | RVDM Debug Core Memory | NAPOT | X | R/W/X |
415/// | | | | | |
416/// | 13 | FLASH (spanning kernel & apps) | NAPOT | X | R |
417/// | | | | | |
418/// | 14 | RAM (spanning kernel & apps) | NAPOT | X | R/W |
419/// | | | | | |
420/// | 15 | MMIO | NAPOT | X | R/W |
421/// |-------+---------------------------------------------+-------+---+-------|
422/// ```
423pub struct EarlGreyEPMP<const HANDOVER_CONFIG_CHECK: bool, DBG: EPMPDebugConfig> {
424 user_pmp_enabled: Cell<bool>,
425 // We can't use our generic parameter to determine the length of the
426 // TORUserPMPCFG array (missing `generic_const_exprs` feature). Thus we
427 // always assume that the debug-port is disabled and we can fit
428 // `TOR_USER_REGIONS_DEBUG_DISABLE` user-mode TOR regions.
429 shadow_user_pmpcfgs: [Cell<TORUserPMPCFG>; TOR_USER_REGIONS_DEBUG_DISABLE],
430 _pd: PhantomData<DBG>,
431}
432
433impl<const HANDOVER_CONFIG_CHECK: bool> EarlGreyEPMP<{ HANDOVER_CONFIG_CHECK }, EPMPDebugDisable> {
434 pub unsafe fn new(
435 flash: FlashRegion,
436 ram: RAMRegion,
437 mmio: MMIORegion,
438 kernel_text: KernelTextRegion,
439 ) -> Result<Self, EarlGreyEPMPError> {
440 use kernel::utilities::registers::interfaces::{Readable, Writeable};
441
442 // --> We start with the "high-priority" ("lockdown") section of the
443 // ePMP configuration:
444
445 // Provide R/X access to the kernel .text as passed to us above.
446 // Allocate a TOR region in PMP entries 2 and 3:
447 csr::CSR.pmpaddr2.set((kernel_text.0.start() as usize) >> 2);
448 csr::CSR.pmpaddr3.set((kernel_text.0.end() as usize) >> 2);
449
450 // Set the appropriate `pmpcfg0` register value:
451 //
452 // 0x80 = 0b10000000, for start the address of the kernel .text TOR
453 // entry as well as entries 0 and 1.
454 // setting L(7) = 1, A(4-3) = OFF, X(2) = 0, W(1) = 0, R(0) = 0
455 //
456 // 0x8d = 0b10001101, for kernel .text TOR region
457 // setting L(7) = 1, A(4-3) = TOR, X(2) = 1, W(1) = 0, R(0) = 1
458 //
459 // Note that we try to lock entries 0 and 1 into OFF mode. If the
460 // ROM_EXT set these up and locked them, this will do nothing, otherwise
461 // it will permanently disable these entries (preventing them from being
462 // misused later).
463 csr::CSR.pmpcfg0.set(0x8d_80_80_80);
464
465 // --> Continue with the "low-priority" ("accessibility") section of the
466 // ePMP configuration:
467
468 // Configure a Read-Only NAPOT region for the entire flash (spanning
469 // kernel & apps, but overlayed by the R/X kernel text TOR section)
470 csr::CSR.pmpaddr12.set(flash.0.napot_addr());
471
472 // Configure a Read-Write NAPOT region for MMIO.
473 csr::CSR.pmpaddr14.set(mmio.0.napot_addr());
474
475 // Configure a Read-Write NAPOT region for the entire RAM (spanning
476 // kernel & apps)
477 csr::CSR.pmpaddr15.set(ram.0.napot_addr());
478
479 // With the FLASH, RAM and MMIO configured in separate regions, we can
480 // activate this new configuration, and further adjust the permissions
481 // of the (currently all-capable) last PMP entry `pmpaddr15` to be R/W,
482 // as required for MMIO:
483 //
484 // 0x99 = 0b10011001, for FLASH NAPOT region
485 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 0, R(0) = 1
486 //
487 // 0x80 = 0b10000000, for the unused region
488 // setting L(7) = 1, A(4-3) = OFF, X(2) = 0, W(1) = 0, R(0) = 0
489 //
490 // 0x9B = 0b10011011, for RAM & MMIO NAPOT regions
491 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 1, R(0) = 1
492 csr::CSR.pmpcfg3.set(0x9B_9B_80_99);
493
494 // Ensure that the other pmpcfgX CSRs are cleared:
495 csr::CSR.pmpcfg1.set(0x00000000);
496 csr::CSR.pmpcfg2.set(0x00000000);
497
498 // ---------- PMP machine CSRs configured, lock down the system
499
500 // Finally, enable machine-mode lockdown.
501 // Set RLB(2) = 0, MMWP(1) = 1, MML(0) = 1
502 csr::CSR.mseccfg.set(0x00000003);
503
504 // ---------- System locked down, cross-check config
505
506 // Now, cross-check that the CSRs have the expected values. This acts as
507 // a sanity check, and can also help to protect against some set of
508 // fault-injection attacks. These checks can't be optimized out by the
509 // compiler, as they invoke assembly underneath which is not marked as
510 // ["pure"](https://doc.rust-lang.org/reference/inline-assembly.html).
511 //
512 // Note that different ROM_EXT versions configure entries 0 and 1
513 // differently, so we only confirm they are locked here.
514 if csr::CSR.mseccfg.get() != 0x00000003
515 || (csr::CSR.pmpcfg0.get() & 0xFFFF8080) != 0x8d808080
516 || csr::CSR.pmpcfg1.get() != 0x00000000
517 || csr::CSR.pmpcfg2.get() != 0x00000000
518 || csr::CSR.pmpcfg3.get() != 0x9B9B8099
519 || csr::CSR.pmpaddr2.get() != (kernel_text.0.start() as usize) >> 2
520 || csr::CSR.pmpaddr3.get() != (kernel_text.0.end() as usize) >> 2
521 || csr::CSR.pmpaddr12.get() != flash.0.napot_addr()
522 || csr::CSR.pmpaddr14.get() != mmio.0.napot_addr()
523 || csr::CSR.pmpaddr15.get() != ram.0.napot_addr()
524 {
525 return Err(EarlGreyEPMPError::SanityCheckFail);
526 }
527
528 // The ePMP hardware was correctly configured, build the ePMP struct:
529 const DEFAULT_USER_PMPCFG_OCTET: Cell<TORUserPMPCFG> = Cell::new(TORUserPMPCFG::OFF);
530 Ok(EarlGreyEPMP {
531 user_pmp_enabled: Cell::new(false),
532 shadow_user_pmpcfgs: [DEFAULT_USER_PMPCFG_OCTET; TOR_USER_REGIONS_DEBUG_DISABLE],
533 _pd: PhantomData,
534 })
535 }
536
537 pub unsafe fn new_test_rom(
538 flash: FlashRegion,
539 ram: RAMRegion,
540 mmio: MMIORegion,
541 kernel_text: KernelTextRegion,
542 ) -> Result<Self, EarlGreyEPMPError> {
543 use kernel::utilities::registers::interfaces::{Readable, Writeable};
544
545 if HANDOVER_CONFIG_CHECK {
546 Self::check_initial_hardware_config()?;
547 } else {
548 // We aren't supposed to run a handover configuration check. This is
549 // useful for environments which don't replicate the OpenTitan
550 // EarlGrey chip behavior entirely accurately, such as
551 // QEMU. However, in those environments, we cannot guarantee that
552 // this configuration is actually going to work, and not break the
553 // system in the meantime.
554 //
555 // We perform a best-effort configuration, starting by setting rule-lock
556 // bypass...
557 csr::CSR.mseccfg.set(0x00000004);
558 // ...adding our required kernel-mode mode memory access rule...
559 csr::CSR.pmpaddr15.set(0x7FFFFFFF);
560 csr::CSR.pmpcfg3.set(0x9F000000);
561 // ...and enabling the machine-mode whitelist policy:
562 csr::CSR.mseccfg.set(0x00000006);
563 }
564
565 // ---------- HW configured as expected, start setting PMP CSRs
566
567 // The below instructions are an intricate dance to achieve our desired
568 // ePMP configuration. For correctness sake, we -- at no intermediate
569 // point -- want to lose access to RAM, FLASH or MMIO.
570 //
571 // This is challenging, as the last section currently provides us access
572 // to all of these regions, and we can't atomically change both its
573 // pmpaddrX and pmpcfgX CSRs to limit it to a subset of its address
574 // range and permissions. Thus, before changing the `pmpcfg3` /
575 // `pmpaddr15` region, we first utilize another higher-priority CSR to
576 // provide us access to one of the memory regions we'd lose access to,
577 // namely we use the PMP entry 12 to provide us access to MMIO.
578
579 // --> We start with the "high-priority" ("lockdown") section of the
580 // ePMP configuration:
581
582 // Provide R/X access to the kernel .text as passed to us above.
583 // Allocate a TOR region in PMP entries 0 and 1:
584 csr::CSR.pmpaddr0.set((kernel_text.0.start() as usize) >> 2);
585 csr::CSR.pmpaddr1.set((kernel_text.0.end() as usize) >> 2);
586
587 // Set the appropriate `pmpcfg0` register value:
588 //
589 // 0x80 = 0b10000000, for start address of the kernel .text TOR entry
590 // and to disable regions 2 & 3 (to be compatible with the
591 // non-test-rom constructor).
592 // setting L(7) = 1, A(4-3) = OFF, X(2) = 0, W(1) = 0, R(0) = 0
593 //
594 // 0x8d = 0b10001101, for kernel .text TOR region
595 // setting L(7) = 1, A(4-3) = TOR, X(2) = 1, W(1) = 0, R(0) = 1
596 csr::CSR.pmpcfg0.set(0x80808d80);
597
598 // --> Continue with the "low-priority" ("accessability") section of the
599 // ePMP configuration:
600
601 // Now, onto `pmpcfg3`. As discussed above, we want to use a temporary
602 // region to retain MMIO access while reconfiguring the `pmpcfg3` /
603 // `pmpaddr15` register. Thus, write the MMIO region access into
604 // `pmpaddr12`:
605 csr::CSR.pmpaddr12.set(mmio.0.napot_addr());
606
607 // Configure a Read-Only NAPOT region for the entire flash (spanning
608 // kernel & apps, but overlayed by the R/X kernel text TOR section)
609 csr::CSR.pmpaddr13.set(flash.0.napot_addr());
610
611 // Configure a Read-Write NAPOT region for the entire RAM (spanning
612 // kernel & apps)
613 csr::CSR.pmpaddr14.set(ram.0.napot_addr());
614
615 // With the FLASH, RAM and MMIO configured in separate regions, we can
616 // activate this new configuration, and further adjust the permissions
617 // of the (currently all-capable) last PMP entry `pmpaddr15` to be R/W,
618 // as required for MMIO:
619 //
620 // 0x99 = 0b10011001, for FLASH NAPOT region
621 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 0, R(0) = 1
622 //
623 // 0x9B = 0b10011011, for RAM & MMIO NAPOT regions
624 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 1, R(0) = 1
625 csr::CSR.pmpcfg3.set(0x9B9B999B);
626
627 // With the new configuration in place, we can adjust the last region's
628 // address to be limited to the MMIO region, ...
629 csr::CSR.pmpaddr15.set(mmio.0.napot_addr());
630
631 // ...and then deactivate the `pmpaddr12` fallback MMIO region
632 //
633 // Remove the temporary MMIO region permissions from `pmpaddr12`:
634 //
635 // 0x80 = 0b10000000
636 // setting L(7) = 1, A(4-3) = OFF, X(2) = 0, W(1) = 0, R(0) = 0
637 //
638 // 0x99 = 0b10011001, for FLASH NAPOT region
639 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 0, R(0) = 1
640 //
641 // 0x9B = 0b10011011, for RAM & MMIO NAPOT regions
642 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 1, R(0) = 1
643 csr::CSR.pmpcfg3.set(0x9B9B9980);
644
645 // Ensure that the other pmpcfgX CSRs are cleared:
646 csr::CSR.pmpcfg1.set(0x00000000);
647 csr::CSR.pmpcfg2.set(0x00000000);
648
649 // ---------- PMP machine CSRs configured, lock down the system
650
651 // Finally, unset the rule-lock bypass (RLB) bit. If we don't have a
652 // debug memory region provided, further set machine-mode lockdown (we
653 // can't enable MML and also have a R/W/X region). We also set MMWP for
654 // good measure, but that shouldn't make a difference -- it can't be
655 // cleared anyways as it is a sticky bit.
656 //
657 // Unsetting RLB with at least one locked region will mean that we can't
658 // set it again, thus actually enforcing the region lock bits.
659 //
660 // Set RLB(2) = 0, MMWP(1) = 1, MML(0) = 1
661 csr::CSR.mseccfg.set(0x00000003);
662
663 // ---------- System locked down, cross-check config
664
665 // Now, cross-check that the CSRs have the expected values. This acts as
666 // a sanity check, and can also help to protect against some set of
667 // fault-injection attacks. These checks can't be optimized out by the
668 // compiler, as they invoke assembly underneath which is not marked as
669 // ["pure"](https://doc.rust-lang.org/reference/inline-assembly.html).
670 if csr::CSR.mseccfg.get() != 0x00000003
671 || csr::CSR.pmpcfg0.get() != 0x00008d80
672 || csr::CSR.pmpcfg1.get() != 0x00000000
673 || csr::CSR.pmpcfg2.get() != 0x00000000
674 || csr::CSR.pmpcfg3.get() != 0x9B9B9980
675 || csr::CSR.pmpaddr0.get() != (kernel_text.0.start() as usize) >> 2
676 || csr::CSR.pmpaddr1.get() != (kernel_text.0.end() as usize) >> 2
677 || csr::CSR.pmpaddr13.get() != flash.0.napot_addr()
678 || csr::CSR.pmpaddr14.get() != ram.0.napot_addr()
679 || csr::CSR.pmpaddr15.get() != mmio.0.napot_addr()
680 {
681 return Err(EarlGreyEPMPError::SanityCheckFail);
682 }
683
684 // The ePMP hardware was correctly configured, build the ePMP struct:
685 const DEFAULT_USER_PMPCFG_OCTET: Cell<TORUserPMPCFG> = Cell::new(TORUserPMPCFG::OFF);
686 Ok(EarlGreyEPMP {
687 user_pmp_enabled: Cell::new(false),
688 shadow_user_pmpcfgs: [DEFAULT_USER_PMPCFG_OCTET; TOR_USER_REGIONS_DEBUG_DISABLE],
689 _pd: PhantomData,
690 })
691 }
692}
693
694impl<const HANDOVER_CONFIG_CHECK: bool> EarlGreyEPMP<{ HANDOVER_CONFIG_CHECK }, EPMPDebugEnable> {
695 pub unsafe fn new_debug(
696 flash: FlashRegion,
697 ram: RAMRegion,
698 mmio: MMIORegion,
699 kernel_text: KernelTextRegion,
700 debug_memory: RVDMRegion,
701 ) -> Result<Self, EarlGreyEPMPError> {
702 use kernel::utilities::registers::interfaces::{Readable, Writeable};
703
704 if HANDOVER_CONFIG_CHECK {
705 Self::check_initial_hardware_config()?;
706 } else {
707 // We aren't supposed to run a handover configuration check. This is
708 // useful for environments which don't replicate the OpenTitan
709 // EarlGrey chip behavior entirely accurately, such as
710 // QEMU. However, in those environments, we cannot guarantee that
711 // this configuration is actually going to work, and not break the
712 // system in the meantime.
713 //
714 // We perform a best-effort configuration, starting by setting rule-lock
715 // bypass...
716 csr::CSR.mseccfg.set(0x00000004);
717 // ...adding our required kernel-mode mode memory access rule...
718 csr::CSR.pmpaddr15.set(0x7FFFFFFF);
719 csr::CSR.pmpcfg3.set(0x9F000000);
720 // ...and enabling the machine-mode whitelist policy:
721 csr::CSR.mseccfg.set(0x00000006);
722 }
723
724 // ---------- HW configured as expected, start setting PMP CSRs
725
726 // The below instructions are an intricate dance to achieve our desired
727 // ePMP configuration. For correctness sake, we -- at no intermediate
728 // point -- want to lose access to RAM, FLASH or MMIO.
729 //
730 // This is challenging, as the last section currently provides us access
731 // to all of these regions, and we can't atomically change both its
732 // pmpaddrX and pmpcfgX CSRs to limit it to a subset of its address
733 // range and permissions. Thus, before changing the `pmpcfg3` /
734 // `pmpaddr15` region, we first utilize another higher-priority CSR to
735 // provide us access to one of the memory regions we'd lose access to,
736 // namely we use the PMP entry 12 to provide us access to MMIO.
737
738 // Provide R/X access to the kernel .text as passed to us above.
739 // Allocate a TOR region in PMP entries 10 and 11:
740 csr::CSR
741 .pmpaddr10
742 .set((kernel_text.0.start() as usize) >> 2);
743 csr::CSR.pmpaddr11.set((kernel_text.0.end() as usize) >> 2);
744
745 // Set the appropriate `pmpcfg2` register value:
746 //
747 // 0x80 = 0b10000000, for start address of the kernel .text TOR entry
748 // setting L(7) = 1, A(4-3) = OFF, X(2) = 0, W(1) = 0, R(0) = 0
749 //
750 // 0x8d = 0b10001101, for kernel .text TOR region
751 // setting L(7) = 1, A(4-3) = TOR, X(2) = 1, W(1) = 0, R(0) = 1
752 csr::CSR.pmpcfg2.set(0x8d800000);
753
754 // Now, onto `pmpcfg3`. As discussed above, we want to use a temporary
755 // region to retain MMIO access while reconfiguring the `pmpcfg3` /
756 // `pmpaddr15` register. Thus, write the MMIO region access into
757 // `pmpaddr12`:
758 csr::CSR.pmpaddr12.set(mmio.0.napot_addr());
759
760 // Configure a Read-Only NAPOT region for the entire flash (spanning
761 // kernel & apps, but overlayed by the R/X kernel text TOR section)
762 csr::CSR.pmpaddr13.set(flash.0.napot_addr());
763
764 // Configure a Read-Write NAPOT region for the entire RAM (spanning
765 // kernel & apps)
766 csr::CSR.pmpaddr14.set(ram.0.napot_addr());
767
768 // With the FLASH, RAM and MMIO configured in separate regions, we can
769 // activate this new configuration, and further adjust the permissions
770 // of the (currently all-capable) last PMP entry `pmpaddr15` to be R/W,
771 // as required for MMIO:
772 //
773 // 0x99 = 0b10011001, for FLASH NAPOT region
774 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 0, R(0) = 1
775 //
776 // 0x9B = 0b10011011, for RAM & MMIO NAPOT regions
777 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 1, R(0) = 1
778 csr::CSR.pmpcfg3.set(0x9B9B999B);
779
780 // With the new configuration in place, we can adjust the last region's
781 // address to be limited to the MMIO region, ...
782 csr::CSR.pmpaddr15.set(mmio.0.napot_addr());
783
784 // ...and then repurpose `pmpaddr12` for the debug port:
785 csr::CSR.pmpaddr12.set(debug_memory.0.napot_addr());
786
787 // 0x9F = 0b10011111, for RVDM R/W/X memory region
788 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 1, W(1) = 1, R(0) = 1
789 //
790 // 0x99 = 0b10011001, for FLASH NAPOT region
791 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 0, R(0) = 1
792 //
793 // 0x9B = 0b10011011, for RAM & MMIO NAPOT regions
794 // setting L(7) = 1, A(4-3) = NAPOT, X(2) = 0, W(1) = 1, R(0) = 1
795 csr::CSR.pmpcfg3.set(0x9B9B999F);
796
797 // Ensure that the other pmpcfgX CSRs are cleared:
798 csr::CSR.pmpcfg0.set(0x00000000);
799 csr::CSR.pmpcfg1.set(0x00000000);
800
801 // ---------- PMP machine CSRs configured, lock down the system
802
803 // Finally, unset the rule-lock bypass (RLB) bit. If we don't have a
804 // debug memory region provided, further set machine-mode lockdown (we
805 // can't enable MML and also have a R/W/X region). We also set MMWP for
806 // good measure, but that shouldn't make a difference -- it can't be
807 // cleared anyways as it is a sticky bit.
808 //
809 // Unsetting RLB with at least one locked region will mean that we can't
810 // set it again, thus actually enforcing the region lock bits.
811 //
812 // Set RLB(2) = 0, MMWP(1) = 1, MML(0) = 0
813 csr::CSR.mseccfg.set(0x00000002);
814
815 // ---------- System locked down, cross-check config
816
817 // Now, cross-check that the CSRs have the expected values. This acts as
818 // a sanity check, and can also help to protect against some set of
819 // fault-injection attacks. These checks can't be optimized out by the
820 // compiler, as they invoke assembly underneath which is not marked as
821 // ["pure"](https://doc.rust-lang.org/reference/inline-assembly.html).
822 if csr::CSR.mseccfg.get() != 0x00000002
823 || csr::CSR.pmpcfg0.get() != 0x00000000
824 || csr::CSR.pmpcfg1.get() != 0x00000000
825 || csr::CSR.pmpcfg2.get() != 0x8d800000
826 || csr::CSR.pmpcfg3.get() != 0x9B9B999F
827 || csr::CSR.pmpaddr10.get() != (kernel_text.0.start() as usize) >> 2
828 || csr::CSR.pmpaddr11.get() != (kernel_text.0.end() as usize) >> 2
829 || csr::CSR.pmpaddr12.get() != debug_memory.0.napot_addr()
830 || csr::CSR.pmpaddr13.get() != flash.0.napot_addr()
831 || csr::CSR.pmpaddr14.get() != ram.0.napot_addr()
832 || csr::CSR.pmpaddr15.get() != mmio.0.napot_addr()
833 {
834 return Err(EarlGreyEPMPError::SanityCheckFail);
835 }
836
837 // Now, as we're not in the machine-mode lockdown (MML) mode, locked PMP
838 // regions will still be accessible to userspace. To prevent our
839 // kernel-mode access regions from being accessible to user-mode, we use
840 // the last user-mode TOR region (`pmpaddr9`) to configure a
841 // "protection" region which disallows access to all memory that has not
842 // otherwise been granted access to.
843 csr::CSR.pmpaddr9.set(0x7FFFFFFF); // the entire address space
844
845 // And finally apply this configuration to the `pmpcfg2` CSR. For good
846 // measure, we also include the locked regions (which we can no longer
847 // modify thanks to RLB = 0).
848 //
849 // 0x18 = 0b00011000, to revoke user-mode perms to all memory
850 // setting L(7) = 0, A(4-3) = NAPOT, X(2) = 0, W(1) = 0, R(0) = 0
851 //
852 // 0x80 = 0b10000000, for start address of the kernel .text TOR entry
853 // setting L(7) = 1, A(4-3) = OFF, X(2) = 0, W(1) = 0, R(0) = 0
854 //
855 // 0x8d = 0b10001101, for kernel .text TOR region
856 // setting L(7) = 1, A(4-3) = TOR, X(2) = 1, W(1) = 0, R(0) = 1
857 csr::CSR.pmpcfg2.set(0x8d81800);
858
859 // The ePMP hardware was correctly configured, build the ePMP struct:
860 const DEFAULT_USER_PMPCFG_OCTET: Cell<TORUserPMPCFG> = Cell::new(TORUserPMPCFG::OFF);
861 let epmp = EarlGreyEPMP {
862 user_pmp_enabled: Cell::new(false),
863 shadow_user_pmpcfgs: [DEFAULT_USER_PMPCFG_OCTET; TOR_USER_REGIONS_DEBUG_DISABLE],
864 _pd: PhantomData,
865 };
866
867 Ok(epmp)
868 }
869}
870
871impl<const HANDOVER_CONFIG_CHECK: bool, DBG: EPMPDebugConfig>
872 EarlGreyEPMP<{ HANDOVER_CONFIG_CHECK }, DBG>
873{
874 fn check_initial_hardware_config() -> Result<(), EarlGreyEPMPError> {
875 use kernel::utilities::registers::interfaces::Readable;
876
877 // This initialization code is written to work with 16 PMP entries. Add
878 // an explicit assertion such that things break when the constant above
879 // is changed:
880 #[allow(clippy::assertions_on_constants)]
881 const _: () = assert!(
882 PMP_ENTRIES_OVER_TWO == 8,
883 "EarlGrey ePMP initialization is written for 16 PMP entries.",
884 );
885
886 // ---------- Check current HW config
887
888 // Ensure that the `mseccfg` CSR has the expected value, namely that
889 // we're in "machine-mode whitelist policy" and have "rule-lock bypass"
890 // enabled. If this register has an unexpected value, we risk
891 // accidentally revoking important permissions for the Tock kernel
892 // itself.
893 if csr::CSR.mseccfg.get() != 0x00000006 {
894 return Err(EarlGreyEPMPError::InvalidInitialMseccfgValue);
895 }
896
897 // We assume the very last PMP region is set to provide us RXW access to
898 // the entirety of memory, and all other regions are disabled. Check the
899 // CSRs to make sure that this is indeed the case.
900 for i in 0..(PMP_ENTRIES_OVER_TWO / 2 - 1) {
901 // 0x98 = 0b10011000, extracting L(7) and A(4-3) bits.
902 if csr::CSR.pmpconfig_get(i) & 0x98989898 != 0x00000000 {
903 return Err(EarlGreyEPMPError::InvalidInitialPmpcfgValue(i));
904 }
905 }
906
907 // The last CSR is special, as we expect it to contain the NAPOT region
908 // which currently gives us memory access.
909 //
910 // 0x98 = 0b10011000, extracting L(7) and A(4-3) bits.
911 // 0x9F = 0b10011111, extracing L(7), A(4-3), X(2), W(1), R(0) bits.
912 if csr::CSR.pmpconfig_get(PMP_ENTRIES_OVER_TWO / 2 - 1) & 0x9F989898 != 0x9F000000 {
913 return Err(EarlGreyEPMPError::InvalidInitialPmpcfgValue(
914 PMP_ENTRIES_OVER_TWO / 2 - 1,
915 ));
916 }
917
918 Ok(())
919 }
920
921 // ---------- Backing functions for the TORUserPMP implementations ---------
922 //
923 // The EarlGrey ePMP implementations of `TORUserPMP` differ between
924 // `EPMPDebugEnable` and `EPMPDebugDisable` configurations. These backing
925 // functions here are applicable to both, and called by those trait
926 // implementations respectively:
927
928 fn user_available_regions<const TOR_USER_REGIONS: usize>(&self) -> usize {
929 // Always assume to have `TOR_USER_REGIONS` usable TOR regions. We have a
930 // fixed number of kernel memory protection regions, and a fixed mapping
931 // of user regions to hardware PMP entries.
932 TOR_USER_REGIONS
933 }
934
935 fn user_configure_pmp<const TOR_USER_REGIONS: usize>(
936 &self,
937 regions: &[(TORUserPMPCFG, *const u8, *const u8); TOR_USER_REGIONS],
938 ) -> Result<(), ()> {
939 // Configure all of the regions' addresses and store their pmpcfg octets
940 // in our shadow storage. If the user PMP is already enabled, we further
941 // apply this configuration (set the pmpcfgX CSRs) by running
942 // `enable_user_pmp`:
943 for (i, (region, shadow_user_pmpcfg)) in regions
944 .iter()
945 .zip(self.shadow_user_pmpcfgs.iter())
946 .enumerate()
947 {
948 // The ePMP in MML mode does not support read-write-execute
949 // regions. If such a region is to be configured, abort. As this
950 // loop here only modifies the shadow state, we can simply abort and
951 // return an error. We don't make any promises about the ePMP state
952 // if the configuration files, but it is still being activated with
953 // `enable_user_pmp`:
954 if region.0.get()
955 == <TORUserPMPCFG as From<mpu::Permissions>>::from(
956 mpu::Permissions::ReadWriteExecute,
957 )
958 .get()
959 {
960 return Err(());
961 }
962
963 // Set the CSR addresses for this region (if its not OFF, in which
964 // case the hardware-configured addresses are irrelevant):
965 if region.0 != TORUserPMPCFG::OFF {
966 csr::CSR.pmpaddr_set(
967 DBG::TOR_USER_ENTRIES_OFFSET + (i * 2) + 0,
968 (region.1 as usize).overflowing_shr(2).0,
969 );
970 csr::CSR.pmpaddr_set(
971 DBG::TOR_USER_ENTRIES_OFFSET + (i * 2) + 1,
972 (region.2 as usize).overflowing_shr(2).0,
973 );
974 }
975
976 // Store the region's pmpcfg octet:
977 shadow_user_pmpcfg.set(region.0);
978 }
979
980 // If the PMP is currently active, apply the changes to the CSRs:
981 if self.user_pmp_enabled.get() {
982 self.user_enable_user_pmp()?;
983 }
984
985 Ok(())
986 }
987
988 fn user_enable_user_pmp(&self) -> Result<(), ()> {
989 // Currently, this code requires the TOR regions to start at an even PMP
990 // region index. Assert that this is indeed the case:
991 #[allow(clippy::let_unit_value)]
992 let _: () = assert!(DBG::TOR_USER_ENTRIES_OFFSET % 2 == 0);
993
994 // We store the "enabled" PMPCFG octets of user regions in the
995 // `shadow_user_pmpcfg` field, such that we can re-enable the PMP
996 // without a call to `configure_pmp` (where the `TORUserPMPCFG`s are
997 // provided by the caller).
998
999 // Could use `iter_array_chunks` once that's stable.
1000 //
1001 // Limit iteration to `DBG::TOR_USER_REGIONS` to avoid overwriting any
1002 // configured debug regions in the last user-mode TOR region.
1003 let mut shadow_user_pmpcfgs_iter = self.shadow_user_pmpcfgs[..DBG::TOR_USER_REGIONS].iter();
1004 let mut i = DBG::TOR_USER_ENTRIES_OFFSET / 2;
1005
1006 while let Some(first_region_pmpcfg) = shadow_user_pmpcfgs_iter.next() {
1007 // If we're at a "region" offset divisible by two (where "region" =
1008 // 2 PMP "entries"), then we can configure an entire `pmpcfgX` CSR
1009 // in one operation. As CSR writes are expensive, this is an
1010 // operation worth making:
1011 let second_region_opt = if i % 2 == 0 {
1012 shadow_user_pmpcfgs_iter.next()
1013 } else {
1014 None
1015 };
1016
1017 if let Some(second_region_pmpcfg) = second_region_opt {
1018 // We're at an even index and have two regions to configure, so
1019 // do that with a single CSR write:
1020 csr::CSR.pmpconfig_set(
1021 i / 2,
1022 u32::from_be_bytes([
1023 second_region_pmpcfg.get().get(),
1024 TORUserPMPCFG::OFF.get(),
1025 first_region_pmpcfg.get().get(),
1026 TORUserPMPCFG::OFF.get(),
1027 ]) as usize,
1028 );
1029
1030 i += 2;
1031 } else if i % 2 == 0 {
1032 // This is a single region at an even index. Thus, modify the
1033 // first two pmpcfgX octets for this region.
1034 csr::CSR.pmpconfig_modify(
1035 i / 2,
1036 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1037 0x0000FFFF,
1038 0, // lower two octets
1039 u32::from_be_bytes([
1040 0,
1041 0,
1042 first_region_pmpcfg.get().get(),
1043 TORUserPMPCFG::OFF.get(),
1044 ]) as usize,
1045 ),
1046 );
1047
1048 i += 1;
1049 } else {
1050 // This is a single region at an odd index. Thus, modify the
1051 // latter two pmpcfgX octets for this region.
1052 csr::CSR.pmpconfig_modify(
1053 i / 2,
1054 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1055 0x0000FFFF,
1056 16, // higher two octets
1057 u32::from_be_bytes([
1058 0,
1059 0,
1060 first_region_pmpcfg.get().get(),
1061 TORUserPMPCFG::OFF.get(),
1062 ]) as usize,
1063 ),
1064 );
1065
1066 i += 1;
1067 }
1068 }
1069
1070 self.user_pmp_enabled.set(true);
1071
1072 Ok(())
1073 }
1074
1075 fn user_disable_user_pmp(&self) {
1076 // Simply set all of the user-region pmpcfg octets to OFF:
1077 let mut user_region_pmpcfg_octet_pairs = (DBG::TOR_USER_ENTRIES_OFFSET / 2)
1078 ..((DBG::TOR_USER_ENTRIES_OFFSET / 2) + DBG::TOR_USER_REGIONS);
1079
1080 while let Some(first_region_idx) = user_region_pmpcfg_octet_pairs.next() {
1081 let second_region_opt = if first_region_idx % 2 == 0 {
1082 user_region_pmpcfg_octet_pairs.next()
1083 } else {
1084 None
1085 };
1086
1087 if let Some(_second_region_idx) = second_region_opt {
1088 // We're at an even index and have two regions to configure, so
1089 // do that with a single CSR write:
1090 csr::CSR.pmpconfig_set(
1091 first_region_idx / 2,
1092 u32::from_be_bytes([
1093 TORUserPMPCFG::OFF.get(),
1094 TORUserPMPCFG::OFF.get(),
1095 TORUserPMPCFG::OFF.get(),
1096 TORUserPMPCFG::OFF.get(),
1097 ]) as usize,
1098 );
1099 } else if first_region_idx % 2 == 0 {
1100 // This is a single region at an even index. Thus, modify the
1101 // first two pmpcfgX octets for this region.
1102 csr::CSR.pmpconfig_modify(
1103 first_region_idx / 2,
1104 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1105 0x0000FFFF,
1106 0, // lower two octets
1107 u32::from_be_bytes([
1108 0,
1109 0,
1110 TORUserPMPCFG::OFF.get(),
1111 TORUserPMPCFG::OFF.get(),
1112 ]) as usize,
1113 ),
1114 );
1115 } else {
1116 // This is a single region at an odd index. Thus, modify the
1117 // latter two pmpcfgX octets for this region.
1118 csr::CSR.pmpconfig_modify(
1119 first_region_idx / 2,
1120 FieldValue::<usize, csr::pmpconfig::pmpcfg::Register>::new(
1121 0x0000FFFF,
1122 16, // higher two octets
1123 u32::from_be_bytes([
1124 0,
1125 0,
1126 TORUserPMPCFG::OFF.get(),
1127 TORUserPMPCFG::OFF.get(),
1128 ]) as usize,
1129 ),
1130 );
1131 }
1132 }
1133
1134 self.user_pmp_enabled.set(false);
1135 }
1136}
1137
1138impl<const HANDOVER_CONFIG_CHECK: bool> TORUserPMP<{ TOR_USER_REGIONS_DEBUG_ENABLE }>
1139 for EarlGreyEPMP<{ HANDOVER_CONFIG_CHECK }, EPMPDebugEnable>
1140{
1141 // Don't require any const-assertions in the EarlGreyEPMP.
1142 const CONST_ASSERT_CHECK: () = ();
1143
1144 fn available_regions(&self) -> usize {
1145 self.user_available_regions::<TOR_USER_REGIONS_DEBUG_ENABLE>()
1146 }
1147
1148 fn configure_pmp(
1149 &self,
1150 regions: &[(TORUserPMPCFG, *const u8, *const u8); TOR_USER_REGIONS_DEBUG_ENABLE],
1151 ) -> Result<(), ()> {
1152 self.user_configure_pmp::<TOR_USER_REGIONS_DEBUG_ENABLE>(regions)
1153 }
1154
1155 fn enable_user_pmp(&self) -> Result<(), ()> {
1156 self.user_enable_user_pmp()
1157 }
1158
1159 fn disable_user_pmp(&self) {
1160 // Technically, the `disable_user_pmp` can be implemented as a no-op in
1161 // the debug-mode ePMP, as machine-mode lockdown (MML) is not enabled.
1162 // However, we still execercise these routines to stay as close to the
1163 // non-debug ePMP configuration as possible:
1164 self.user_disable_user_pmp()
1165 }
1166}
1167
1168impl<const HANDOVER_CONFIG_CHECK: bool> TORUserPMP<{ TOR_USER_REGIONS_DEBUG_DISABLE }>
1169 for EarlGreyEPMP<{ HANDOVER_CONFIG_CHECK }, EPMPDebugDisable>
1170{
1171 // Don't require any const-assertions in the EarlGreyEPMP.
1172 const CONST_ASSERT_CHECK: () = ();
1173
1174 fn available_regions(&self) -> usize {
1175 self.user_available_regions::<TOR_USER_REGIONS_DEBUG_DISABLE>()
1176 }
1177
1178 fn configure_pmp(
1179 &self,
1180 regions: &[(TORUserPMPCFG, *const u8, *const u8); TOR_USER_REGIONS_DEBUG_DISABLE],
1181 ) -> Result<(), ()> {
1182 self.user_configure_pmp::<TOR_USER_REGIONS_DEBUG_DISABLE>(regions)
1183 }
1184
1185 fn enable_user_pmp(&self) -> Result<(), ()> {
1186 self.user_enable_user_pmp()
1187 }
1188
1189 fn disable_user_pmp(&self) {
1190 self.user_disable_user_pmp()
1191 }
1192}
1193
1194impl<const HANDOVER_CONFIG_CHECK: bool, DBG: EPMPDebugConfig> fmt::Display
1195 for EarlGreyEPMP<{ HANDOVER_CONFIG_CHECK }, DBG>
1196{
1197 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1198 use kernel::utilities::registers::interfaces::Readable;
1199
1200 write!(f, " EarlGrey ePMP configuration:\r\n")?;
1201 write!(
1202 f,
1203 " mseccfg: {:#08X}, user-mode PMP active: {:?}\r\n",
1204 csr::CSR.mseccfg.get(),
1205 self.user_pmp_enabled.get()
1206 )?;
1207 unsafe { format_pmp_entries::<PMP_ENTRIES>(f) }?;
1208
1209 write!(f, " Shadow PMP entries for user-mode:\r\n")?;
1210 for (i, shadowed_pmpcfg) in self.shadow_user_pmpcfgs[..DBG::TOR_USER_REGIONS]
1211 .iter()
1212 .enumerate()
1213 {
1214 let (start_pmpaddr_label, startaddr_pmpaddr, endaddr, mode) =
1215 if shadowed_pmpcfg.get() == TORUserPMPCFG::OFF {
1216 (
1217 "pmpaddr",
1218 csr::CSR.pmpaddr_get(DBG::TOR_USER_ENTRIES_OFFSET + (i * 2)),
1219 0,
1220 "OFF",
1221 )
1222 } else {
1223 (
1224 " start",
1225 csr::CSR
1226 .pmpaddr_get(DBG::TOR_USER_ENTRIES_OFFSET + (i * 2))
1227 .overflowing_shl(2)
1228 .0,
1229 csr::CSR
1230 .pmpaddr_get(DBG::TOR_USER_ENTRIES_OFFSET + (i * 2) + 1)
1231 .overflowing_shl(2)
1232 .0
1233 | 0b11,
1234 "TOR",
1235 )
1236 };
1237
1238 write!(
1239 f,
1240 " [{:02}]: {}={:#010X}, end={:#010X}, cfg={:#04X} ({}) ({}{}{}{})\r\n",
1241 DBG::TOR_USER_ENTRIES_OFFSET + (i * 2) + 1,
1242 start_pmpaddr_label,
1243 startaddr_pmpaddr,
1244 endaddr,
1245 shadowed_pmpcfg.get().get(),
1246 mode,
1247 if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::l) {
1248 "l"
1249 } else {
1250 "-"
1251 },
1252 if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::r) {
1253 "r"
1254 } else {
1255 "-"
1256 },
1257 if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::w) {
1258 "w"
1259 } else {
1260 "-"
1261 },
1262 if shadowed_pmpcfg.get().get_reg().is_set(pmpcfg_octet::x) {
1263 "x"
1264 } else {
1265 "-"
1266 },
1267 )?;
1268 }
1269
1270 Ok(())
1271 }
1272}