1use capsules_core::virtualizers::virtual_alarm::{MuxAlarm, VirtualMuxAlarm};
36use capsules_extra::log;
37use core::cell::Cell;
38use core::ptr::addr_of_mut;
39use kernel::debug;
40use kernel::hil::flash;
41use kernel::hil::gpio::{self, Interrupt, InterruptEdge};
42use kernel::hil::log::{LogRead, LogReadClient, LogWrite, LogWriteClient};
43use kernel::hil::time::{Alarm, AlarmClient, ConvertTicks};
44use kernel::static_init;
45use kernel::storage_volume;
46use kernel::utilities::cells::{NumericCellExt, TakeCell};
47use kernel::ErrorCode;
48use nrf52840::{
49 gpio::{GPIOPin, Pin},
50 nvmc::{NrfPage, Nvmc},
51 rtc::Rtc,
52};
53
54storage_volume!(TEST_LOG, 16);
56
57pub unsafe fn run(mux_alarm: &'static MuxAlarm<'static, Rtc>, flash_controller: &'static Nvmc) {
58 flash_controller.configure_writeable();
60 flash_controller.configure_eraseable();
61 let pagebuffer = static_init!(NrfPage, NrfPage::default());
62
63 let log = static_init!(
65 Log,
66 log::Log::new(&TEST_LOG, flash_controller, pagebuffer, true)
67 );
68 flash::HasClient::set_client(flash_controller, log);
69 kernel::deferred_call::DeferredCallClient::register(log);
70
71 let alarm = static_init!(
72 VirtualMuxAlarm<'static, Rtc>,
73 VirtualMuxAlarm::new(mux_alarm)
74 );
75 alarm.setup();
76
77 let test = static_init!(
79 LogTest<VirtualMuxAlarm<'static, Rtc>>,
80 LogTest::new(log, &mut *addr_of_mut!(BUFFER), alarm, &TEST_OPS)
81 );
82 log.set_read_client(test);
83 log.set_append_client(test);
84 test.alarm.set_alarm_client(test);
85
86 let log_erase_pin = GPIOPin::new(Pin::P1_08);
88 log_erase_pin.enable_interrupts(InterruptEdge::RisingEdge);
89 log_erase_pin.set_client(test);
90
91 test.wait();
92}
93
94static TEST_OPS: [TestOp; 24] = [
95 TestOp::BadRead,
97 TestOp::Read,
98 TestOp::BadWrite,
100 TestOp::Write,
101 TestOp::Read,
102 TestOp::BadWrite,
103 TestOp::Write,
104 TestOp::Read,
105 TestOp::SeekBeginning,
107 TestOp::Read,
108 TestOp::SeekBeginning,
110 TestOp::Write,
111 TestOp::BadRead,
113 TestOp::Read,
114 TestOp::Write,
117 TestOp::Sync,
118 TestOp::Read,
119 TestOp::Write,
121 TestOp::BadSeek(0),
122 TestOp::BadSeek(usize::MAX),
123 TestOp::Read,
124 TestOp::BadWrite,
126 TestOp::Read,
127 TestOp::Sync,
129];
130
131static mut BUFFER: [u8; 8] = [0; 8];
133const BUFFER_LEN: usize = 8;
135const VALUE_SHIFT: usize = 8 * (8 - BUFFER_LEN);
137static mut DUMMY_BUFFER: [u8; 4160] = [0; 4160];
139const WAIT_MS: u32 = 10;
141const MAGIC: u64 = 0x0102030405060708;
143const ENTRIES_PER_WRITE: u64 = 120;
145
146#[derive(Clone, Copy, PartialEq)]
148enum TestState {
149 Operate, Erase, CleanUp, }
153
154#[derive(Clone, Copy, PartialEq, Debug)]
156enum TestOp {
157 Read,
158 BadRead,
159 Write,
160 BadWrite,
161 Sync,
162 SeekBeginning,
163 BadSeek(usize),
164}
165
166type Log = log::Log<'static, Nvmc>;
167
168struct LogTest<A: 'static + Alarm<'static>> {
169 log: &'static Log,
170 buffer: TakeCell<'static, [u8]>,
171 alarm: &'static A,
172 state: Cell<TestState>,
173 ops: &'static [TestOp],
174 op_index: Cell<usize>,
175 op_start: Cell<bool>,
176 read_val: Cell<u64>,
177 write_val: Cell<u64>,
178}
179
180impl<A: 'static + Alarm<'static>> LogTest<A> {
181 fn new(
182 log: &'static Log,
183 buffer: &'static mut [u8],
184 alarm: &'static A,
185 ops: &'static [TestOp],
186 ) -> LogTest<A> {
187 let read_val = entry_id_to_test_value(log.next_read_entry_id());
189 let write_val = entry_id_to_test_value(log.log_end());
190
191 debug!(
192 "Log recovered (Start & end entry IDs: {:?} & {:?}; read & write values: {} & {})",
193 log.next_read_entry_id(),
194 log.log_end(),
195 read_val,
196 write_val
197 );
198
199 LogTest {
200 log,
201 buffer: TakeCell::new(buffer),
202 alarm,
203 state: Cell::new(TestState::Operate),
204 ops,
205 op_index: Cell::new(0),
206 op_start: Cell::new(true),
207 read_val: Cell::new(read_val),
208 write_val: Cell::new(write_val),
209 }
210 }
211
212 fn run(&self) {
213 match self.state.get() {
214 TestState::Operate => {
215 let op_index = self.op_index.get();
216 if op_index == self.ops.len() {
217 self.state.set(TestState::CleanUp);
218 let _ = self.log.seek(self.log.log_start());
219 return;
220 }
221
222 match self.ops[op_index] {
223 TestOp::Read => self.read(),
224 TestOp::BadRead => self.bad_read(),
225 TestOp::Write => self.write(),
226 TestOp::BadWrite => self.bad_write(),
227 TestOp::Sync => self.sync(),
228 TestOp::SeekBeginning => self.seek_beginning(),
229 TestOp::BadSeek(entry_id) => self.bad_seek(entry_id),
230 }
231 }
232 TestState::Erase => self.erase(),
233 TestState::CleanUp => {
234 debug!(
235 "Success! Final start & end entry IDs: {:?} & {:?})",
236 self.log.next_read_entry_id(),
237 self.log.log_end()
238 );
239 }
240 }
241 }
242
243 fn next_op(&self) {
244 self.op_index.increment();
245 self.op_start.set(true);
246 }
247
248 fn erase(&self) {
249 match self.log.erase() {
250 Ok(()) => (),
251 Err(ErrorCode::BUSY) => {
252 self.wait();
253 }
254 _ => panic!("Couldn't erase log!"),
255 }
256 }
257
258 fn read(&self) {
259 if self.op_start.get() {
261 let next_read_val = entry_id_to_test_value(self.log.next_read_entry_id());
262 if self.read_val.get() < next_read_val {
263 debug!(
264 "Increasing read value from {} to {} due to clobbering (read entry ID is {:?})!",
265 self.read_val.get(),
266 next_read_val,
267 self.log.next_read_entry_id()
268 );
269 self.read_val.set(next_read_val);
270 }
271 }
272
273 self.buffer.take().map_or_else(
274 || panic!("NO BUFFER"),
275 move |buffer| {
276 buffer.clone_from_slice(&0u64.to_be_bytes());
278
279 if let Err((error, original_buffer)) = self.log.read(buffer, BUFFER_LEN) {
280 self.buffer.replace(original_buffer);
281 match error {
282 ErrorCode::FAIL => {
283 debug!(
285 "READ DONE: READ OFFSET: {:?} / WRITE OFFSET: {:?}",
286 self.log.next_read_entry_id(),
287 self.log.log_end()
288 );
289 self.next_op();
290 self.run();
291 }
292 ErrorCode::BUSY => {
293 debug!("Flash busy, waiting before reattempting read");
294 self.wait();
295 }
296 _ => panic!("READ #{} FAILED: {:?}", self.read_val.get(), error),
297 }
298 }
299 },
300 );
301 }
302
303 fn bad_read(&self) {
304 self.buffer
306 .take()
307 .map(
308 move |buffer| match self.log.read(buffer, buffer.len() + 1) {
309 Ok(()) => panic!("Read with too-large max read length succeeded unexpectedly!"),
310 Err((error, original_buffer)) => {
311 self.buffer.replace(original_buffer);
312 assert_eq!(error, ErrorCode::INVAL);
313 }
314 },
315 )
316 .unwrap();
317
318 self.buffer
320 .take()
321 .map(move |buffer| match self.log.read(buffer, BUFFER_LEN - 1) {
322 Ok(()) => panic!("Read with too-small buffer succeeded unexpectedly!"),
323 Err((error, original_buffer)) => {
324 self.buffer.replace(original_buffer);
325 if self.read_val.get() == self.write_val.get() {
326 assert_eq!(error, ErrorCode::FAIL);
327 } else {
328 assert_eq!(error, ErrorCode::SIZE);
329 }
330 }
331 })
332 .unwrap();
333
334 self.next_op();
335 self.run();
336 }
337
338 fn write(&self) {
339 self.buffer
340 .take()
341 .map(move |buffer| {
342 buffer.clone_from_slice(
343 &(MAGIC + (self.write_val.get() << VALUE_SHIFT)).to_be_bytes(),
344 );
345 if let Err((error, original_buffer)) = self.log.append(buffer, BUFFER_LEN) {
346 self.buffer.replace(original_buffer);
347
348 match error {
349 ErrorCode::BUSY => self.wait(),
350 _ => panic!("WRITE FAILED: {:?}", error),
351 }
352 }
353 })
354 .unwrap();
355 }
356
357 fn bad_write(&self) {
358 let original_offset = self.log.log_end();
359
360 self.buffer
362 .take()
363 .map(move |buffer| match self.log.append(buffer, 0) {
364 Ok(()) => panic!("Appending entry of size 0 succeeded unexpectedly!"),
365 Err((error, original_buffer)) => {
366 self.buffer.replace(original_buffer);
367 assert_eq!(error, ErrorCode::INVAL);
368 }
369 })
370 .unwrap();
371
372 self.buffer
374 .take()
375 .map(
376 move |buffer| match self.log.append(buffer, buffer.len() + 1) {
377 Ok(()) => panic!("Appending with too-small buffer succeeded unexpectedly!"),
378 Err((error, original_buffer)) => {
379 self.buffer.replace(original_buffer);
380 assert_eq!(error, ErrorCode::INVAL);
381 }
382 },
383 )
384 .unwrap();
385
386 unsafe {
388 let dummy_buffer = &mut *addr_of_mut!(DUMMY_BUFFER);
389 let len = dummy_buffer.len();
390 match self.log.append(dummy_buffer, len) {
391 Ok(()) => panic!("Appending with too-small buffer succeeded unexpectedly!"),
392 Err((error, _original_buffer)) => assert_eq!(error, ErrorCode::SIZE),
393 }
394 }
395
396 assert_eq!(original_offset, self.log.log_end());
398 self.next_op();
399 self.run();
400 }
401
402 fn sync(&self) {
403 match self.log.sync() {
404 Ok(()) => (),
405 error => panic!("Sync failed: {:?}", error),
406 }
407 }
408
409 fn seek_beginning(&self) {
410 let entry_id = self.log.log_start();
411 match self.log.seek(entry_id) {
412 Ok(()) => debug!("Seeking to {:?}...", entry_id),
413 error => panic!("Seek failed: {:?}", error),
414 }
415 }
416
417 fn bad_seek(&self, entry_id: usize) {
418 let original_offset = self.log.next_read_entry_id();
420 match self.log.seek(entry_id) {
421 Err(ErrorCode::INVAL) => (),
422 Ok(()) => panic!(
423 "Seek to invalid entry ID {:?} succeeded unexpectedly!",
424 entry_id
425 ),
426 error => panic!(
427 "Seek to invalid entry ID {:?} failed with unexpected error {:?}!",
428 entry_id, error
429 ),
430 }
431
432 assert_eq!(original_offset, self.log.next_read_entry_id());
434 self.next_op();
435 self.run();
436 }
437
438 fn wait(&self) {
439 let delay = self.alarm.ticks_from_ms(WAIT_MS);
440 let now = self.alarm.now();
441 self.alarm.set_alarm(now, delay);
442 }
443}
444
445impl<A: Alarm<'static>> LogReadClient for LogTest<A> {
446 fn read_done(&self, buffer: &'static mut [u8], length: usize, error: Result<(), ErrorCode>) {
447 match error {
448 Ok(()) => {
449 if length != BUFFER_LEN {
451 panic!(
452 "{} bytes read, expected {} on read number {} (offset {:?}). Value read was {:?}",
453 length,
454 BUFFER_LEN,
455 self.read_val.get(),
456 self.log.next_read_entry_id(),
457 &buffer[0..length],
458 );
459 }
460
461 let expected = (MAGIC + (self.read_val.get() << VALUE_SHIFT)).to_be_bytes();
463 for i in 0..BUFFER_LEN {
464 if buffer[i] != expected[i] {
465 panic!(
466 "Expected {:?}, read {:?} on read number {} (offset {:?})",
467 &expected[0..BUFFER_LEN],
468 &buffer[0..BUFFER_LEN],
469 self.read_val.get(),
470 self.log.next_read_entry_id(),
471 );
472 }
473 }
474
475 self.buffer.replace(buffer);
476 self.read_val.set(self.read_val.get() + 1);
477 self.op_start.set(false);
478 self.wait();
479 }
480 _ => {
481 panic!("Read failed unexpectedly!");
482 }
483 }
484 }
485
486 fn seek_done(&self, error: Result<(), ErrorCode>) {
487 if error == Ok(()) {
488 debug!("Seeked");
489 self.read_val
490 .set(entry_id_to_test_value(self.log.next_read_entry_id()));
491 } else {
492 panic!("Seek failed: {:?}", error);
493 }
494
495 if self.state.get() == TestState::Operate {
496 self.next_op();
497 }
498 self.run();
499 }
500}
501
502impl<A: Alarm<'static>> LogWriteClient for LogTest<A> {
503 fn append_done(
504 &self,
505 buffer: &'static mut [u8],
506 length: usize,
507 records_lost: bool,
508 error: Result<(), ErrorCode>,
509 ) {
510 self.buffer.replace(buffer);
511 self.op_start.set(false);
512
513 match error {
514 Ok(()) => {
515 if length != BUFFER_LEN {
516 panic!(
517 "Appended {} bytes, expected {} (write #{}, offset {:?})!",
518 length,
519 BUFFER_LEN,
520 self.write_val.get(),
521 self.log.log_end()
522 );
523 }
524 let expected_records_lost =
525 self.write_val.get() > entry_id_to_test_value(TEST_LOG.len());
526 if records_lost && records_lost != expected_records_lost {
527 panic!("Append callback states records_lost = {}, expected {} (write #{}, offset {:?})!",
528 records_lost,
529 expected_records_lost,
530 self.write_val.get(),
531 self.log.log_end()
532 );
533 }
534
535 if (self.write_val.get() + 1) % ENTRIES_PER_WRITE == 0 {
537 debug!(
538 "WRITE DONE: READ OFFSET: {:?} / WRITE OFFSET: {:?}",
539 self.log.next_read_entry_id(),
540 self.log.log_end()
541 );
542 self.next_op();
543 }
544
545 self.write_val.set(self.write_val.get() + 1);
546 }
547 Err(ErrorCode::FAIL) => {
548 assert_eq!(length, 0);
549 assert!(!records_lost);
550 debug!("Append failed due to flash error, retrying...");
551 }
552 error => panic!("UNEXPECTED APPEND FAILURE: {:?}", error),
553 }
554
555 self.wait();
556 }
557
558 fn sync_done(&self, error: Result<(), ErrorCode>) {
559 if error == Ok(()) {
560 debug!(
561 "SYNC DONE: READ OFFSET: {:?} / WRITE OFFSET: {:?}",
562 self.log.next_read_entry_id(),
563 self.log.log_end()
564 );
565 } else {
566 panic!("Sync failed: {:?}", error);
567 }
568
569 self.next_op();
570 self.run();
571 }
572
573 fn erase_done(&self, error: Result<(), ErrorCode>) {
574 match error {
575 Ok(()) => {
576 self.op_index.set(0);
578 self.op_start.set(true);
579 self.read_val.set(0);
580 self.write_val.set(0);
581
582 for i in 0..TEST_LOG.len() {
584 if TEST_LOG[i] != 0xFF {
585 panic!(
586 "Log not properly erased, read {} at byte {}. SUMMARY: {:?}",
587 TEST_LOG[i],
588 i,
589 &TEST_LOG[i..i + 8]
590 );
591 }
592 }
593
594 self.buffer.take().map(move |buffer| {
596 if let Err((error, original_buffer)) = self.log.read(buffer, BUFFER_LEN) {
597 self.buffer.replace(original_buffer);
598 match error {
599 ErrorCode::FAIL => (),
600 ErrorCode::BUSY => {
601 self.wait();
602 }
603 _ => panic!("Read on empty log did not fail as expected: {:?}", error),
604 }
605 } else {
606 panic!("Read on empty log succeeded! (it shouldn't)");
607 }
608 });
609
610 debug!("Log Storage erased");
612 self.state.set(TestState::Operate);
613 self.run();
614 }
615 Err(ErrorCode::BUSY) => {
616 self.wait();
618 }
619 _ => {
620 panic!("Erase failed: {:?}", error);
621 }
622 }
623 }
624}
625
626impl<A: Alarm<'static>> AlarmClient for LogTest<A> {
627 fn alarm(&self) {
628 self.run();
629 }
630}
631
632impl<A: Alarm<'static>> gpio::Client for LogTest<A> {
633 fn fired(&self) {
634 self.state.set(TestState::Erase);
636 self.erase();
637 }
638}
639
640fn entry_id_to_test_value(entry_id: usize) -> u64 {
641 const PAGE_SIZE: usize = 4096;
643
644 let pages_written = entry_id / PAGE_SIZE;
645 let entry_size = log::ENTRY_HEADER_SIZE + BUFFER_LEN;
646 let entries_per_page = (PAGE_SIZE - log::PAGE_HEADER_SIZE) / entry_size;
647 let entries_lrtc_page = if entry_id % PAGE_SIZE >= log::PAGE_HEADER_SIZE {
648 (entry_id % PAGE_SIZE - log::PAGE_HEADER_SIZE) / entry_size
649 } else {
650 0
651 };
652 (pages_written * entries_per_page + entries_lrtc_page) as u64
653}