nano33ble/test/
log_test.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Tests the log storage interface in circular mode. For testing in linear mode, see
6//! linear_log_test.rs.
7//!
8//! This testing framework creates a circular log storage interface in flash and runs a series of
9//! operations upon it. The tests check to make sure that the correct values are read and written
10//! after each operation, that errors are properly detected and handled, and that the log generally
11//! behaves as expected. The tests perform both valid and invalid operations to fully test the log's
12//! behavior.
13//!
14//! Shorting the `P1.08` button on the nano33ble at any time during the test will erase the log and reset
15//! the test state. Pressing the `RESET` button will reboot the nano33ble without erasing the log,
16//! allowing for testing logs across reboots.
17//!
18//! In order to fully test the log, the tester should try a variety of erases and reboots to ensure
19//! that the log works correctly across these operations. The tester can also modify the testing
20//! operations and parameters defined below to test logs in different configurations. Different
21//! configurations should be tested in order to exercise the log under a greater number of
22//! scenarios (e.g. saturating/not saturating log pages with data, always/not always ending
23//! operations at page boundaries, etc.).
24//!
25//! To run the test, uncomment the following line to the nano33ble boot sequence:
26//! ```
27//! test::log_test::run(
28//!     mux_alarm,
29//!     &nrf52840_peripherals.nrf52.nvmc,
30//! );
31//! ```
32//! and use the `USER` and `RESET` buttons to manually erase the log and reboot the nano33ble,
33//! respectively.
34
35use capsules_core::virtualizers::virtual_alarm::{MuxAlarm, VirtualMuxAlarm};
36use capsules_extra::log;
37use core::cell::Cell;
38use core::ptr::addr_of_mut;
39use kernel::debug;
40use kernel::hil::flash;
41use kernel::hil::gpio::{self, Interrupt, InterruptEdge};
42use kernel::hil::log::{LogRead, LogReadClient, LogWrite, LogWriteClient};
43use kernel::hil::time::{Alarm, AlarmClient, ConvertTicks};
44use kernel::static_init;
45use kernel::storage_volume;
46use kernel::utilities::cells::{NumericCellExt, TakeCell};
47use kernel::ErrorCode;
48use nrf52840::{
49    gpio::{GPIOPin, Pin},
50    nvmc::{NrfPage, Nvmc},
51    rtc::Rtc,
52};
53
54// Allocate 16 KiB volume for log storage (the nano33ble page size is 4 KiB).
55storage_volume!(TEST_LOG, 16);
56
57pub unsafe fn run(mux_alarm: &'static MuxAlarm<'static, Rtc>, flash_controller: &'static Nvmc) {
58    // Set up flash controller.
59    flash_controller.configure_writeable();
60    flash_controller.configure_eraseable();
61    let pagebuffer = static_init!(NrfPage, NrfPage::default());
62
63    // Create actual log storage abstraction on top of flash.
64    let log = static_init!(
65        Log,
66        log::Log::new(&TEST_LOG, flash_controller, pagebuffer, true)
67    );
68    flash::HasClient::set_client(flash_controller, log);
69    kernel::deferred_call::DeferredCallClient::register(log);
70
71    let alarm = static_init!(
72        VirtualMuxAlarm<'static, Rtc>,
73        VirtualMuxAlarm::new(mux_alarm)
74    );
75    alarm.setup();
76
77    // Create and run test for log storage.
78    let test = static_init!(
79        LogTest<VirtualMuxAlarm<'static, Rtc>>,
80        LogTest::new(log, &mut *addr_of_mut!(BUFFER), alarm, &TEST_OPS)
81    );
82    log.set_read_client(test);
83    log.set_append_client(test);
84    test.alarm.set_alarm_client(test);
85
86    // Configure GPIO pin P1.08 as the log erase pin.
87    let log_erase_pin = GPIOPin::new(Pin::P1_08);
88    log_erase_pin.enable_interrupts(InterruptEdge::RisingEdge);
89    log_erase_pin.set_client(test);
90
91    test.wait();
92}
93
94static TEST_OPS: [TestOp; 24] = [
95    // Read back any existing entries.
96    TestOp::BadRead,
97    TestOp::Read,
98    // Write multiple pages, but don't fill log.
99    TestOp::BadWrite,
100    TestOp::Write,
101    TestOp::Read,
102    TestOp::BadWrite,
103    TestOp::Write,
104    TestOp::Read,
105    // Seek to beginning and re-verify entire log.
106    TestOp::SeekBeginning,
107    TestOp::Read,
108    // Write multiple pages, over-filling log and overwriting oldest entries.
109    TestOp::SeekBeginning,
110    TestOp::Write,
111    // Read offset should be incremented since it was invalidated by previous write.
112    TestOp::BadRead,
113    TestOp::Read,
114    // Write multiple pages and sync. Read offset should be invalidated due to sync clobbering
115    // previous read offset.
116    TestOp::Write,
117    TestOp::Sync,
118    TestOp::Read,
119    // Try bad seeks, should fail and not change read entry ID.
120    TestOp::Write,
121    TestOp::BadSeek(0),
122    TestOp::BadSeek(usize::MAX),
123    TestOp::Read,
124    // Try bad write, nothing should change.
125    TestOp::BadWrite,
126    TestOp::Read,
127    // Sync log before finishing test so that all changes persist for next test iteration.
128    TestOp::Sync,
129];
130
131// Buffer for reading from and writing to in the log tests.
132static mut BUFFER: [u8; 8] = [0; 8];
133// Length of buffer to actually use.
134const BUFFER_LEN: usize = 8;
135// Amount to shift value before adding to magic in order to fit in buffer.
136const VALUE_SHIFT: usize = 8 * (8 - BUFFER_LEN);
137// Dummy buffer for testing bad writes.
138static mut DUMMY_BUFFER: [u8; 4160] = [0; 4160];
139// Time to wait in between log operations.
140const WAIT_MS: u32 = 10;
141// Magic number to write to log storage (+ offset).
142const MAGIC: u64 = 0x0102030405060708;
143// Number of entries to write per write operation.
144const ENTRIES_PER_WRITE: u64 = 120;
145
146// Test's current state.
147#[derive(Clone, Copy, PartialEq)]
148enum TestState {
149    Operate, // Running through test operations.
150    Erase,   // Erasing log and restarting test.
151    CleanUp, // Cleaning up test after all operations complete.
152}
153
154// A single operation within the test.
155#[derive(Clone, Copy, PartialEq, Debug)]
156enum TestOp {
157    Read,
158    BadRead,
159    Write,
160    BadWrite,
161    Sync,
162    SeekBeginning,
163    BadSeek(usize),
164}
165
166type Log = log::Log<'static, Nvmc>;
167
168struct LogTest<A: 'static + Alarm<'static>> {
169    log: &'static Log,
170    buffer: TakeCell<'static, [u8]>,
171    alarm: &'static A,
172    state: Cell<TestState>,
173    ops: &'static [TestOp],
174    op_index: Cell<usize>,
175    op_start: Cell<bool>,
176    read_val: Cell<u64>,
177    write_val: Cell<u64>,
178}
179
180impl<A: 'static + Alarm<'static>> LogTest<A> {
181    fn new(
182        log: &'static Log,
183        buffer: &'static mut [u8],
184        alarm: &'static A,
185        ops: &'static [TestOp],
186    ) -> LogTest<A> {
187        // Recover test state.
188        let read_val = entry_id_to_test_value(log.next_read_entry_id());
189        let write_val = entry_id_to_test_value(log.log_end());
190
191        debug!(
192            "Log recovered (Start & end entry IDs: {:?} & {:?}; read & write values: {} & {})",
193            log.next_read_entry_id(),
194            log.log_end(),
195            read_val,
196            write_val
197        );
198
199        LogTest {
200            log,
201            buffer: TakeCell::new(buffer),
202            alarm,
203            state: Cell::new(TestState::Operate),
204            ops,
205            op_index: Cell::new(0),
206            op_start: Cell::new(true),
207            read_val: Cell::new(read_val),
208            write_val: Cell::new(write_val),
209        }
210    }
211
212    fn run(&self) {
213        match self.state.get() {
214            TestState::Operate => {
215                let op_index = self.op_index.get();
216                if op_index == self.ops.len() {
217                    self.state.set(TestState::CleanUp);
218                    let _ = self.log.seek(self.log.log_start());
219                    return;
220                }
221
222                match self.ops[op_index] {
223                    TestOp::Read => self.read(),
224                    TestOp::BadRead => self.bad_read(),
225                    TestOp::Write => self.write(),
226                    TestOp::BadWrite => self.bad_write(),
227                    TestOp::Sync => self.sync(),
228                    TestOp::SeekBeginning => self.seek_beginning(),
229                    TestOp::BadSeek(entry_id) => self.bad_seek(entry_id),
230                }
231            }
232            TestState::Erase => self.erase(),
233            TestState::CleanUp => {
234                debug!(
235                    "Success! Final start & end entry IDs: {:?} & {:?})",
236                    self.log.next_read_entry_id(),
237                    self.log.log_end()
238                );
239            }
240        }
241    }
242
243    fn next_op(&self) {
244        self.op_index.increment();
245        self.op_start.set(true);
246    }
247
248    fn erase(&self) {
249        match self.log.erase() {
250            Ok(()) => (),
251            Err(ErrorCode::BUSY) => {
252                self.wait();
253            }
254            _ => panic!("Couldn't erase log!"),
255        }
256    }
257
258    fn read(&self) {
259        // Update read value if clobbered by previous operation.
260        if self.op_start.get() {
261            let next_read_val = entry_id_to_test_value(self.log.next_read_entry_id());
262            if self.read_val.get() < next_read_val {
263                debug!(
264                    "Increasing read value from {} to {} due to clobbering (read entry ID is {:?})!",
265                    self.read_val.get(),
266                    next_read_val,
267                    self.log.next_read_entry_id()
268                );
269                self.read_val.set(next_read_val);
270            }
271        }
272
273        self.buffer.take().map_or_else(
274            || panic!("NO BUFFER"),
275            move |buffer| {
276                // Clear buffer first to make debugging more sane.
277                buffer.clone_from_slice(&0u64.to_be_bytes());
278
279                if let Err((error, original_buffer)) = self.log.read(buffer, BUFFER_LEN) {
280                    self.buffer.replace(original_buffer);
281                    match error {
282                        ErrorCode::FAIL => {
283                            // No more entries, start writing again.
284                            debug!(
285                                "READ DONE: READ OFFSET: {:?} / WRITE OFFSET: {:?}",
286                                self.log.next_read_entry_id(),
287                                self.log.log_end()
288                            );
289                            self.next_op();
290                            self.run();
291                        }
292                        ErrorCode::BUSY => {
293                            debug!("Flash busy, waiting before reattempting read");
294                            self.wait();
295                        }
296                        _ => panic!("READ #{} FAILED: {:?}", self.read_val.get(), error),
297                    }
298                }
299            },
300        );
301    }
302
303    fn bad_read(&self) {
304        // Ensure failure if buffer is smaller than provided max read length.
305        self.buffer
306            .take()
307            .map(
308                move |buffer| match self.log.read(buffer, buffer.len() + 1) {
309                    Ok(()) => panic!("Read with too-large max read length succeeded unexpectedly!"),
310                    Err((error, original_buffer)) => {
311                        self.buffer.replace(original_buffer);
312                        assert_eq!(error, ErrorCode::INVAL);
313                    }
314                },
315            )
316            .unwrap();
317
318        // Ensure failure if buffer is too small to hold entry.
319        self.buffer
320            .take()
321            .map(move |buffer| match self.log.read(buffer, BUFFER_LEN - 1) {
322                Ok(()) => panic!("Read with too-small buffer succeeded unexpectedly!"),
323                Err((error, original_buffer)) => {
324                    self.buffer.replace(original_buffer);
325                    if self.read_val.get() == self.write_val.get() {
326                        assert_eq!(error, ErrorCode::FAIL);
327                    } else {
328                        assert_eq!(error, ErrorCode::SIZE);
329                    }
330                }
331            })
332            .unwrap();
333
334        self.next_op();
335        self.run();
336    }
337
338    fn write(&self) {
339        self.buffer
340            .take()
341            .map(move |buffer| {
342                buffer.clone_from_slice(
343                    &(MAGIC + (self.write_val.get() << VALUE_SHIFT)).to_be_bytes(),
344                );
345                if let Err((error, original_buffer)) = self.log.append(buffer, BUFFER_LEN) {
346                    self.buffer.replace(original_buffer);
347
348                    match error {
349                        ErrorCode::BUSY => self.wait(),
350                        _ => panic!("WRITE FAILED: {:?}", error),
351                    }
352                }
353            })
354            .unwrap();
355    }
356
357    fn bad_write(&self) {
358        let original_offset = self.log.log_end();
359
360        // Ensure failure if entry length is 0.
361        self.buffer
362            .take()
363            .map(move |buffer| match self.log.append(buffer, 0) {
364                Ok(()) => panic!("Appending entry of size 0 succeeded unexpectedly!"),
365                Err((error, original_buffer)) => {
366                    self.buffer.replace(original_buffer);
367                    assert_eq!(error, ErrorCode::INVAL);
368                }
369            })
370            .unwrap();
371
372        // Ensure failure if proposed entry length is greater than buffer length.
373        self.buffer
374            .take()
375            .map(
376                move |buffer| match self.log.append(buffer, buffer.len() + 1) {
377                    Ok(()) => panic!("Appending with too-small buffer succeeded unexpectedly!"),
378                    Err((error, original_buffer)) => {
379                        self.buffer.replace(original_buffer);
380                        assert_eq!(error, ErrorCode::INVAL);
381                    }
382                },
383            )
384            .unwrap();
385
386        // Ensure failure if entry is too large to fit within a single flash page.
387        unsafe {
388            let dummy_buffer = &mut *addr_of_mut!(DUMMY_BUFFER);
389            let len = dummy_buffer.len();
390            match self.log.append(dummy_buffer, len) {
391                Ok(()) => panic!("Appending with too-small buffer succeeded unexpectedly!"),
392                Err((error, _original_buffer)) => assert_eq!(error, ErrorCode::SIZE),
393            }
394        }
395
396        // Make sure that append offset was not changed by failed writes.
397        assert_eq!(original_offset, self.log.log_end());
398        self.next_op();
399        self.run();
400    }
401
402    fn sync(&self) {
403        match self.log.sync() {
404            Ok(()) => (),
405            error => panic!("Sync failed: {:?}", error),
406        }
407    }
408
409    fn seek_beginning(&self) {
410        let entry_id = self.log.log_start();
411        match self.log.seek(entry_id) {
412            Ok(()) => debug!("Seeking to {:?}...", entry_id),
413            error => panic!("Seek failed: {:?}", error),
414        }
415    }
416
417    fn bad_seek(&self, entry_id: usize) {
418        // Make sure seek fails with INVAL.
419        let original_offset = self.log.next_read_entry_id();
420        match self.log.seek(entry_id) {
421            Err(ErrorCode::INVAL) => (),
422            Ok(()) => panic!(
423                "Seek to invalid entry ID {:?} succeeded unexpectedly!",
424                entry_id
425            ),
426            error => panic!(
427                "Seek to invalid entry ID {:?} failed with unexpected error {:?}!",
428                entry_id, error
429            ),
430        }
431
432        // Make sure that read offset was not changed by failed seek.
433        assert_eq!(original_offset, self.log.next_read_entry_id());
434        self.next_op();
435        self.run();
436    }
437
438    fn wait(&self) {
439        let delay = self.alarm.ticks_from_ms(WAIT_MS);
440        let now = self.alarm.now();
441        self.alarm.set_alarm(now, delay);
442    }
443}
444
445impl<A: Alarm<'static>> LogReadClient for LogTest<A> {
446    fn read_done(&self, buffer: &'static mut [u8], length: usize, error: Result<(), ErrorCode>) {
447        match error {
448            Ok(()) => {
449                // Verify correct number of bytes were read.
450                if length != BUFFER_LEN {
451                    panic!(
452                        "{} bytes read, expected {} on read number {} (offset {:?}). Value read was {:?}",
453                        length,
454                        BUFFER_LEN,
455                        self.read_val.get(),
456                        self.log.next_read_entry_id(),
457                        &buffer[0..length],
458                    );
459                }
460
461                // Verify correct value was read.
462                let expected = (MAGIC + (self.read_val.get() << VALUE_SHIFT)).to_be_bytes();
463                for i in 0..BUFFER_LEN {
464                    if buffer[i] != expected[i] {
465                        panic!(
466                            "Expected {:?}, read {:?} on read number {} (offset {:?})",
467                            &expected[0..BUFFER_LEN],
468                            &buffer[0..BUFFER_LEN],
469                            self.read_val.get(),
470                            self.log.next_read_entry_id(),
471                        );
472                    }
473                }
474
475                self.buffer.replace(buffer);
476                self.read_val.set(self.read_val.get() + 1);
477                self.op_start.set(false);
478                self.wait();
479            }
480            _ => {
481                panic!("Read failed unexpectedly!");
482            }
483        }
484    }
485
486    fn seek_done(&self, error: Result<(), ErrorCode>) {
487        if error == Ok(()) {
488            debug!("Seeked");
489            self.read_val
490                .set(entry_id_to_test_value(self.log.next_read_entry_id()));
491        } else {
492            panic!("Seek failed: {:?}", error);
493        }
494
495        if self.state.get() == TestState::Operate {
496            self.next_op();
497        }
498        self.run();
499    }
500}
501
502impl<A: Alarm<'static>> LogWriteClient for LogTest<A> {
503    fn append_done(
504        &self,
505        buffer: &'static mut [u8],
506        length: usize,
507        records_lost: bool,
508        error: Result<(), ErrorCode>,
509    ) {
510        self.buffer.replace(buffer);
511        self.op_start.set(false);
512
513        match error {
514            Ok(()) => {
515                if length != BUFFER_LEN {
516                    panic!(
517                        "Appended {} bytes, expected {} (write #{}, offset {:?})!",
518                        length,
519                        BUFFER_LEN,
520                        self.write_val.get(),
521                        self.log.log_end()
522                    );
523                }
524                let expected_records_lost =
525                    self.write_val.get() > entry_id_to_test_value(TEST_LOG.len());
526                if records_lost && records_lost != expected_records_lost {
527                    panic!("Append callback states records_lost = {}, expected {} (write #{}, offset {:?})!",
528                           records_lost,
529                           expected_records_lost,
530                           self.write_val.get(),
531                           self.log.log_end()
532                    );
533                }
534
535                // Stop writing after `ENTRIES_PER_WRITE` entries have been written.
536                if (self.write_val.get() + 1) % ENTRIES_PER_WRITE == 0 {
537                    debug!(
538                        "WRITE DONE: READ OFFSET: {:?} / WRITE OFFSET: {:?}",
539                        self.log.next_read_entry_id(),
540                        self.log.log_end()
541                    );
542                    self.next_op();
543                }
544
545                self.write_val.set(self.write_val.get() + 1);
546            }
547            Err(ErrorCode::FAIL) => {
548                assert_eq!(length, 0);
549                assert!(!records_lost);
550                debug!("Append failed due to flash error, retrying...");
551            }
552            error => panic!("UNEXPECTED APPEND FAILURE: {:?}", error),
553        }
554
555        self.wait();
556    }
557
558    fn sync_done(&self, error: Result<(), ErrorCode>) {
559        if error == Ok(()) {
560            debug!(
561                "SYNC DONE: READ OFFSET: {:?} / WRITE OFFSET: {:?}",
562                self.log.next_read_entry_id(),
563                self.log.log_end()
564            );
565        } else {
566            panic!("Sync failed: {:?}", error);
567        }
568
569        self.next_op();
570        self.run();
571    }
572
573    fn erase_done(&self, error: Result<(), ErrorCode>) {
574        match error {
575            Ok(()) => {
576                // Reset test state.
577                self.op_index.set(0);
578                self.op_start.set(true);
579                self.read_val.set(0);
580                self.write_val.set(0);
581
582                // Make sure that flash has been erased.
583                for i in 0..TEST_LOG.len() {
584                    if TEST_LOG[i] != 0xFF {
585                        panic!(
586                            "Log not properly erased, read {} at byte {}. SUMMARY: {:?}",
587                            TEST_LOG[i],
588                            i,
589                            &TEST_LOG[i..i + 8]
590                        );
591                    }
592                }
593
594                // Make sure that a read on an empty log fails normally.
595                self.buffer.take().map(move |buffer| {
596                    if let Err((error, original_buffer)) = self.log.read(buffer, BUFFER_LEN) {
597                        self.buffer.replace(original_buffer);
598                        match error {
599                            ErrorCode::FAIL => (),
600                            ErrorCode::BUSY => {
601                                self.wait();
602                            }
603                            _ => panic!("Read on empty log did not fail as expected: {:?}", error),
604                        }
605                    } else {
606                        panic!("Read on empty log succeeded! (it shouldn't)");
607                    }
608                });
609
610                // Move to next operation.
611                debug!("Log Storage erased");
612                self.state.set(TestState::Operate);
613                self.run();
614            }
615            Err(ErrorCode::BUSY) => {
616                // Flash busy, try again.
617                self.wait();
618            }
619            _ => {
620                panic!("Erase failed: {:?}", error);
621            }
622        }
623    }
624}
625
626impl<A: Alarm<'static>> AlarmClient for LogTest<A> {
627    fn alarm(&self) {
628        self.run();
629    }
630}
631
632impl<A: Alarm<'static>> gpio::Client for LogTest<A> {
633    fn fired(&self) {
634        // Erase log.
635        self.state.set(TestState::Erase);
636        self.erase();
637    }
638}
639
640fn entry_id_to_test_value(entry_id: usize) -> u64 {
641    // Page and entry header sizes for log storage.
642    const PAGE_SIZE: usize = 4096;
643
644    let pages_written = entry_id / PAGE_SIZE;
645    let entry_size = log::ENTRY_HEADER_SIZE + BUFFER_LEN;
646    let entries_per_page = (PAGE_SIZE - log::PAGE_HEADER_SIZE) / entry_size;
647    let entries_lrtc_page = if entry_id % PAGE_SIZE >= log::PAGE_HEADER_SIZE {
648        (entry_id % PAGE_SIZE - log::PAGE_HEADER_SIZE) / entry_size
649    } else {
650        0
651    };
652    (pages_written * entries_per_page + entries_lrtc_page) as u64
653}