capsules_extra/log.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Implements a log storage abstraction for storing persistent data in flash.
6//!
7//! Data entries can be appended to the end of a log and read back in-order. Logs may be linear
8//! (denying writes when full) or circular (overwriting the oldest entries with the newest entries
9//! when the underlying flash volume is full). The storage volumes that logs operate upon are
10//! statically allocated at compile time and cannot be dynamically created at runtime.
11//!
12//! Entries can be identified and seeked-to with their unique Entry IDs. Entry IDs maintain the
13//! ordering of the underlying entries, and an entry with a larger entry ID is newer and comes
14//! after an entry with a smaller ID. IDs can also be used to determine the physical position of
15//! entries within the log's underlying storage volume - taking the ID modulo the size of the
16//! underlying storage volume yields the position of the entry's header relative to the start of
17//! the volume. Entries should not be created manually by clients, only retrieved through the
18//! `log_start()`, `log_end()`, and `next_read_entry_id()` functions.
19//!
20//! Entry IDs are not explicitly stored in the log. Instead, each page of the log contains a header
21//! containing the page's offset relative to the start of the log (i.e. if the page size is 512
22//! bytes, then page #0 will have an offset of 0, page #1 an offset of 512, etc.). The offsets
23//! continue to increase even after a circular log wraps around (so if 5 512-byte pages of data are
24//! written to a 4 page log, then page #0 will now have an offset of 2048). Thus, the ID of an
25//! entry can be calculated by taking the offset of the page within the log and adding the offset
26//! of the entry within the page to find the position of the entry within the log (which is the
27//! ID). Entries also have a header of their own, which contains the length of the entry.
28//!
29//! Logs support the following basic operations:
30//! * Read: Read back previously written entries in whole. Entries are read in their
31//! entirety (no partial reads) from oldest to newest.
32//! * Seek: Seek to different entries to begin reading from a different entry (can only
33//! seek to the start of entries).
34//! * Append: Append new data entries onto the end of a log. Can fail if the new entry is too
35//! large to fit within the log.
36//! * Sync: Sync a log to flash to ensure that all changes are persistent.
37//! * Erase: Erase a log in its entirety, clearing the underlying flash volume.
38//! See the documentation for each individual function for more detail on how they operate.
39//!
40//! Note that while logs persist across reboots, they will be erased upon flashing a new kernel.
41//!
42//! Usage
43//! -----
44//!
45//! ```rust,ignore
46//! storage_volume!(VOLUME, 2);
47//! static mut PAGEBUFFER: sam4l::flashcalw::Sam4lPage = sam4l::flashcalw::Sam4lPage::new();
48//!
49//! let log = static_init!(
50//! capsules::log::Log,
51//! capsules::log::Log::new(
52//! &VOLUME,
53//! &mut sam4l::flashcalw::FLASH_CONTROLLER,
54//! &mut PAGEBUFFER,
55//! true
56//! )
57//! );
58//! log.register();
59//! kernel::hil::flash::HasClient::set_client(&sam4l::flashcalw::FLASH_CONTROLLER, log);
60//!
61//! log.set_read_client(log_storage_read_client);
62//! log.set_append_client(log_storage_append_client);
63//! ```
64
65use core::cell::Cell;
66use core::mem::size_of;
67use core::unreachable;
68
69use kernel::deferred_call::{DeferredCall, DeferredCallClient};
70use kernel::hil::flash::{self, Flash};
71use kernel::hil::log::{LogRead, LogReadClient, LogWrite, LogWriteClient};
72use kernel::utilities::cells::{OptionalCell, TakeCell};
73use kernel::ErrorCode;
74
75/// Globally declare entry ID type.
76type EntryID = usize;
77
78/// Maximum page header size.
79pub const PAGE_HEADER_SIZE: usize = size_of::<EntryID>();
80/// Maximum entry header size.
81pub const ENTRY_HEADER_SIZE: usize = size_of::<usize>();
82
83/// Byte used to pad the end of a page.
84const PAD_BYTE: u8 = 0xFF;
85
86/// Log state keeps track of any in-progress asynchronous operations.
87#[derive(Clone, Copy, PartialEq)]
88enum State {
89 Idle,
90 Read,
91 Seek,
92 Append,
93 Sync,
94 Erase,
95}
96
97pub struct Log<'a, F: Flash + 'static> {
98 /// Underlying storage volume.
99 volume: &'static [u8],
100 /// Capacity of log in bytes.
101 capacity: usize,
102 /// Flash interface.
103 driver: &'a F,
104 /// Buffer for a flash page.
105 pagebuffer: TakeCell<'static, F::Page>,
106 /// Size of a flash page.
107 page_size: usize,
108 /// Whether or not the log is circular.
109 circular: bool,
110 /// Read client using Log.
111 read_client: OptionalCell<&'a dyn LogReadClient>,
112 /// Append client using Log.
113 append_client: OptionalCell<&'a dyn LogWriteClient>,
114
115 /// Current operation being executed, if asynchronous.
116 state: Cell<State>,
117 /// Entry ID of oldest entry remaining in log.
118 oldest_entry_id: Cell<EntryID>,
119 /// Entry ID of next entry to read.
120 read_entry_id: Cell<EntryID>,
121 /// Entry ID of next entry to append.
122 append_entry_id: Cell<EntryID>,
123
124 /// Deferred call for deferring client callbacks.
125 deferred_call: DeferredCall,
126
127 // Note: for saving state across stack ripping.
128 /// Client-provided buffer to write from.
129 buffer: TakeCell<'static, [u8]>,
130 /// Length of data within buffer.
131 length: Cell<usize>,
132 /// Whether or not records were lost in the previous append.
133 records_lost: Cell<bool>,
134 /// Error returned by previously executed operation (or Ok(())).
135 error: Cell<Result<(), ErrorCode>>,
136}
137
138impl<'a, F: Flash + 'static> Log<'a, F> {
139 pub fn new(
140 volume: &'static [u8],
141 driver: &'a F,
142 pagebuffer: &'static mut F::Page,
143 circular: bool,
144 ) -> Self {
145 let page_size = pagebuffer.as_mut().len();
146 let capacity = volume.len() - PAGE_HEADER_SIZE * (volume.len() / page_size);
147
148 let log: Log<'a, F> = Self {
149 volume,
150 capacity,
151 driver,
152 pagebuffer: TakeCell::new(pagebuffer),
153 page_size,
154 circular,
155 read_client: OptionalCell::empty(),
156 append_client: OptionalCell::empty(),
157 state: Cell::new(State::Idle),
158 oldest_entry_id: Cell::new(PAGE_HEADER_SIZE),
159 read_entry_id: Cell::new(PAGE_HEADER_SIZE),
160 append_entry_id: Cell::new(PAGE_HEADER_SIZE),
161 deferred_call: DeferredCall::new(),
162 buffer: TakeCell::empty(),
163 length: Cell::new(0),
164 records_lost: Cell::new(false),
165 error: Cell::new(Err(ErrorCode::NODEVICE)),
166 };
167
168 log.reconstruct();
169 log
170 }
171
172 /// Returns the page number of the page containing the entry with the given ID.
173 fn page_number(&self, entry_id: EntryID) -> usize {
174 (self.volume.as_ptr() as usize + entry_id % self.volume.len()) / self.page_size
175 }
176
177 /// Gets the buffer containing the byte at the given position in the log.
178 fn get_buffer<'b>(&self, pos: usize, pagebuffer: &'b mut F::Page) -> &'b [u8] {
179 // Subtract 1 from append entry ID to get position of last bit written. This is needed
180 // because the pagebuffer always contains the last written bit, but not necessarily the
181 // position represented by the append entry ID (i.e. the pagebuffer isn't flushed yet when
182 // `append_entry_id % page_size == 0`).
183 if pos / self.page_size == (self.append_entry_id.get() - 1) / self.page_size {
184 pagebuffer.as_mut()
185 } else {
186 self.volume
187 }
188 }
189
190 /// Gets the byte at the given position in the log.
191 fn get_byte(&self, pos: usize, pagebuffer: &mut F::Page) -> u8 {
192 let buffer = self.get_buffer(pos, pagebuffer);
193 buffer[pos % buffer.len()]
194 }
195
196 /// Gets a `num_bytes` long slice of bytes starting from a position within the log.
197 fn get_bytes<'b>(&self, pos: usize, num_bytes: usize, pagebuffer: &'b mut F::Page) -> &'b [u8] {
198 let buffer = self.get_buffer(pos, pagebuffer);
199 let offset = pos % buffer.len();
200 &buffer[offset..offset + num_bytes]
201 }
202
203 /// Resets a log back to an empty log. Returns whether or not the log was reset successfully.
204 fn reset(&self) -> bool {
205 self.oldest_entry_id.set(PAGE_HEADER_SIZE);
206 self.read_entry_id.set(PAGE_HEADER_SIZE);
207 self.append_entry_id.set(PAGE_HEADER_SIZE);
208 self.pagebuffer.take().is_some_and(move |pagebuffer| {
209 for e in pagebuffer.as_mut().iter_mut() {
210 *e = 0;
211 }
212 self.pagebuffer.replace(pagebuffer);
213 true
214 })
215 }
216
217 /// Reconstructs a log from flash.
218 fn reconstruct(&self) {
219 // Read page headers, get IDs of oldest and newest pages.
220 let mut oldest_page_id: EntryID = usize::MAX;
221 let mut newest_page_id: EntryID = 0;
222 for header_pos in (0..self.volume.len()).step_by(self.page_size) {
223 let page_id = {
224 const ID_SIZE: usize = size_of::<EntryID>();
225 let id_bytes = &self.volume[header_pos..header_pos + ID_SIZE];
226 let id_bytes = <[u8; ID_SIZE]>::try_from(id_bytes).unwrap();
227 usize::from_ne_bytes(id_bytes)
228 };
229
230 // Validate page ID read from header.
231 if page_id % self.volume.len() == header_pos {
232 if page_id < oldest_page_id {
233 oldest_page_id = page_id;
234 }
235 if page_id > newest_page_id {
236 newest_page_id = page_id;
237 }
238 }
239 }
240
241 // Reconstruct log if at least one valid page was found (meaning oldest page ID was set to
242 // something not usize::MAX).
243 if oldest_page_id != usize::MAX {
244 // Walk entries in last (newest) page to calculate last page length.
245 let mut last_page_len = PAGE_HEADER_SIZE;
246 loop {
247 // Check if next byte is start of valid entry.
248 let volume_offset = newest_page_id % self.volume.len() + last_page_len;
249 if self.volume[volume_offset] == 0 || self.volume[volume_offset] == PAD_BYTE {
250 break;
251 }
252
253 // Get next entry length.
254 let entry_length = {
255 const LENGTH_SIZE: usize = size_of::<usize>();
256 let length_bytes = &self.volume[volume_offset..volume_offset + LENGTH_SIZE];
257 let length_bytes = <[u8; LENGTH_SIZE]>::try_from(length_bytes).unwrap();
258 usize::from_ne_bytes(length_bytes)
259 } + ENTRY_HEADER_SIZE;
260
261 // Add to page length if length is valid (fits within remainder of page.
262 if last_page_len + entry_length <= self.page_size {
263 last_page_len += entry_length;
264 if last_page_len == self.page_size {
265 break;
266 }
267 } else {
268 break;
269 }
270 }
271
272 // Set tracked entry IDs.
273 self.oldest_entry_id.set(oldest_page_id + PAGE_HEADER_SIZE);
274 self.read_entry_id.set(oldest_page_id + PAGE_HEADER_SIZE);
275 self.append_entry_id.set(newest_page_id + last_page_len);
276
277 // Populate page buffer.
278 self.pagebuffer
279 .take()
280 .map(move |pagebuffer| {
281 // Determine if pagebuffer should be reset or copied from flash.
282 let mut copy_pagebuffer = last_page_len % self.page_size != 0;
283 if !copy_pagebuffer {
284 // Last page full, reset pagebuffer for next page.
285 copy_pagebuffer = !self.reset_pagebuffer(pagebuffer);
286 }
287 if copy_pagebuffer {
288 // Copy last page into pagebuffer.
289 for i in 0..self.page_size {
290 pagebuffer.as_mut()[i] =
291 self.volume[newest_page_id % self.volume.len() + i];
292 }
293 }
294 self.pagebuffer.replace(pagebuffer);
295 })
296 .unwrap();
297 } else {
298 // No valid pages found, create fresh log.
299 self.reset();
300 }
301 }
302
303 /// Returns the ID of the next entry to read or an error if no entry could be retrieved.
304 /// Result<(), ErrorCode>s used:
305 /// * FAIL: reached end of log, nothing to read.
306 /// * RESERVE: client or internal pagebuffer missing.
307 fn get_next_entry(&self) -> Result<EntryID, Result<(), ErrorCode>> {
308 self.pagebuffer
309 .take()
310 .map_or(Err(Err(ErrorCode::RESERVE)), move |pagebuffer| {
311 let mut entry_id = self.read_entry_id.get();
312
313 // Skip page header if at start of page or skip padded bytes if at end of page.
314 if entry_id % self.page_size == 0 {
315 entry_id += PAGE_HEADER_SIZE;
316 } else if self.get_byte(entry_id, pagebuffer) == PAD_BYTE {
317 entry_id += self.page_size - entry_id % self.page_size + PAGE_HEADER_SIZE;
318 }
319
320 // Check if end of log was reached and return.
321 self.pagebuffer.replace(pagebuffer);
322 if entry_id >= self.append_entry_id.get() {
323 Err(Err(ErrorCode::FAIL))
324 } else {
325 Ok(entry_id)
326 }
327 })
328 }
329
330 /// Reads and returns the contents of an entry header with the given ID. Fails if the header
331 /// data is invalid.
332 /// Result<(), ErrorCode>s used:
333 /// * FAIL: entry header invalid.
334 /// * RESERVE: client or internal pagebuffer missing.
335 fn read_entry_header(&self, entry_id: EntryID) -> Result<usize, Result<(), ErrorCode>> {
336 self.pagebuffer
337 .take()
338 .map_or(Err(Err(ErrorCode::RESERVE)), move |pagebuffer| {
339 // Get length.
340 const LENGTH_SIZE: usize = size_of::<usize>();
341 let length_bytes = self.get_bytes(entry_id, LENGTH_SIZE, pagebuffer);
342 let length_bytes = <[u8; LENGTH_SIZE]>::try_from(length_bytes).unwrap();
343 let length = usize::from_ne_bytes(length_bytes);
344
345 // Return length of next entry.
346 self.pagebuffer.replace(pagebuffer);
347 if length == 0 || length > self.page_size - PAGE_HEADER_SIZE - ENTRY_HEADER_SIZE {
348 Err(Err(ErrorCode::FAIL))
349 } else {
350 Ok(length)
351 }
352 })
353 }
354
355 /// Reads the next entry into a buffer. Returns the number of bytes read on success, or an
356 /// error otherwise.
357 /// Result<(), ErrorCode>s used:
358 /// * FAIL: reached end of log, nothing to read.
359 /// * RESERVE: internal pagebuffer missing, log is presumably broken.
360 /// * SIZE: buffer not large enough to contain entry being read.
361 fn read_entry(&self, buffer: &mut [u8], length: usize) -> Result<usize, Result<(), ErrorCode>> {
362 // Get next entry to read. Immediately returns FAIL in event of failure.
363 let entry_id = self.get_next_entry()?;
364 let entry_length = self.read_entry_header(entry_id)?;
365
366 // Read entry into buffer.
367 self.pagebuffer
368 .take()
369 .map_or(Err(Err(ErrorCode::RESERVE)), move |pagebuffer| {
370 // Ensure buffer is large enough to hold log entry.
371 if entry_length > length {
372 self.pagebuffer.replace(pagebuffer);
373 return Err(Err(ErrorCode::SIZE));
374 }
375 let entry_id = entry_id + ENTRY_HEADER_SIZE;
376
377 // Copy data into client buffer.
378 let data = self.get_bytes(entry_id, entry_length, pagebuffer);
379 buffer[..entry_length].copy_from_slice(&data[..entry_length]);
380
381 // Update read entry ID and return number of bytes read.
382 self.read_entry_id.set(entry_id + entry_length);
383 self.pagebuffer.replace(pagebuffer);
384 Ok(entry_length)
385 })
386 }
387
388 /// Writes an entry header at the given position within a page. Must write at most
389 /// ENTRY_HEADER_SIZE bytes.
390 fn write_entry_header(&self, length: usize, pos: usize, pagebuffer: &mut F::Page) {
391 for (offset, byte) in length.to_ne_bytes().iter().enumerate() {
392 pagebuffer.as_mut()[pos + offset] = *byte;
393 }
394 }
395
396 /// Appends data from a buffer onto the end of the log. Requires that there is enough space
397 /// remaining in the pagebuffer for the entry (including metadata).
398 fn append_entry(
399 &self,
400 buffer: &'static mut [u8],
401 length: usize,
402 pagebuffer: &'static mut F::Page,
403 ) {
404 // Offset within page to append to.
405 let append_entry_id = self.append_entry_id.get();
406 let mut page_offset = append_entry_id % self.page_size;
407
408 // Write entry header to pagebuffer.
409 self.write_entry_header(length, page_offset, pagebuffer);
410 page_offset += ENTRY_HEADER_SIZE;
411
412 // Copy data to pagebuffer.
413 pagebuffer.as_mut()[page_offset..(length + page_offset)].copy_from_slice(&buffer[..length]);
414
415 // Increment append offset by number of bytes appended.
416 let append_entry_id = append_entry_id + length + ENTRY_HEADER_SIZE;
417 self.append_entry_id.set(append_entry_id);
418
419 // Replace pagebuffer and callback client.
420 self.pagebuffer.replace(pagebuffer);
421 self.buffer.replace(buffer);
422 self.records_lost
423 .set(self.oldest_entry_id.get() != PAGE_HEADER_SIZE);
424 self.error.set(Ok(()));
425 self.client_callback();
426 }
427
428 /// Flushes the pagebuffer to flash. Log state must be non-idle before calling, else data races
429 /// may occur due to asynchronous page write.
430 /// Result<(), ErrorCode>s used:
431 /// * Ok(()): flush started successfully.
432 /// * FAIL: flash driver not configured.
433 /// * BUSY: flash driver busy.
434 fn flush_pagebuffer(&self, pagebuffer: &'static mut F::Page) -> Result<(), ErrorCode> {
435 // Pad end of page.
436 let mut pad_ptr = self.append_entry_id.get();
437 while pad_ptr % self.page_size != 0 {
438 pagebuffer.as_mut()[pad_ptr % self.page_size] = PAD_BYTE;
439 pad_ptr += 1;
440 }
441
442 // Get flash page to write to and log page being overwritten. Subtract page_size since
443 // padding pointer points to start of the page following the one we want to flush after the
444 // padding operation.
445 let page_number = self.page_number(pad_ptr - self.page_size);
446 let overwritten_page = (pad_ptr - self.volume.len() - self.page_size) / self.page_size;
447
448 // Advance read and oldest entry IDs, if within flash page being overwritten.
449 let read_entry_id = self.read_entry_id.get();
450 if read_entry_id / self.page_size == overwritten_page {
451 // Move read entry ID to start of next page.
452 self.read_entry_id.set(
453 read_entry_id + self.page_size + PAGE_HEADER_SIZE - read_entry_id % self.page_size,
454 );
455 }
456
457 let oldest_entry_id = self.oldest_entry_id.get();
458 if oldest_entry_id / self.page_size == overwritten_page {
459 self.oldest_entry_id.set(oldest_entry_id + self.page_size);
460 }
461
462 // Sync page to flash.
463 match self.driver.write_page(page_number, pagebuffer) {
464 Ok(()) => Ok(()),
465 Err((ecode, pagebuffer)) => {
466 self.pagebuffer.replace(pagebuffer);
467 Err(ecode)
468 }
469 }
470 }
471
472 /// Resets the pagebuffer so that new data can be written. Note that this also increments the
473 /// append entry ID to point to the start of writable data in this new page. Does not reset
474 /// pagebuffer or modify append entry ID if the end of a non-circular log is reached. Returns
475 /// whether or not the pagebuffer was reset.
476 fn reset_pagebuffer(&self, pagebuffer: &mut F::Page) -> bool {
477 // Make sure this is not the last page of a non-circular buffer.
478 let mut append_entry_id = self.append_entry_id.get();
479 if !self.circular && append_entry_id + self.page_size > self.volume.len() {
480 return false;
481 }
482
483 // Increment append entry ID to point at start of next page.
484 if append_entry_id % self.page_size != 0 {
485 append_entry_id += self.page_size - append_entry_id % self.page_size;
486 }
487
488 // Write page header to pagebuffer.
489 let id_bytes = append_entry_id.to_ne_bytes();
490 pagebuffer.as_mut()[..id_bytes.len()].copy_from_slice(&id_bytes[..]);
491
492 // Note: this is the only place where the append entry ID can cross page boundaries.
493 self.append_entry_id.set(append_entry_id + PAGE_HEADER_SIZE);
494 true
495 }
496
497 /// Erases a single page from storage.
498 fn erase_page(&self) -> Result<(), ErrorCode> {
499 // Uses oldest entry ID to keep track of which page to erase. Thus, the oldest pages will be
500 // erased first and the log will remain in a valid state even if it fails to be erased
501 // completely.
502 self.driver
503 .erase_page(self.page_number(self.oldest_entry_id.get()))
504 }
505
506 /// Defers a client callback until later.
507 fn deferred_client_callback(&self) {
508 self.deferred_call.set();
509 }
510
511 /// Resets the log state to idle and makes a client callback. The values returned by via the
512 /// callback must be saved within the log's state before making a callback.
513 fn client_callback(&self) {
514 let state = self.state.get();
515 match state {
516 State::Read | State::Seek => {
517 self.state.set(State::Idle);
518 self.read_client
519 .map(move |read_client| match state {
520 State::Read => self
521 .buffer
522 .take()
523 .map(move |buffer| {
524 read_client.read_done(buffer, self.length.get(), self.error.get());
525 })
526 .unwrap(),
527 State::Seek => read_client.seek_done(self.error.get()),
528 _ => unreachable!(),
529 })
530 .unwrap();
531 }
532 State::Append | State::Sync | State::Erase => {
533 self.state.set(State::Idle);
534 self.append_client
535 .map(move |append_client| match state {
536 State::Append => self
537 .buffer
538 .take()
539 .map(move |buffer| {
540 append_client.append_done(
541 buffer,
542 self.length.get(),
543 self.records_lost.get(),
544 self.error.get(),
545 );
546 })
547 .unwrap(),
548 State::Sync => append_client.sync_done(self.error.get()),
549 State::Erase => append_client.erase_done(self.error.get()),
550 _ => unreachable!(),
551 })
552 .unwrap();
553 }
554 State::Idle => (),
555 }
556 }
557}
558
559impl<'a, F: Flash + 'static> LogRead<'a> for Log<'a, F> {
560 type EntryID = EntryID;
561
562 /// Set the client for read operation callbacks.
563 fn set_read_client(&self, read_client: &'a dyn LogReadClient) {
564 self.read_client.set(read_client);
565 }
566
567 /// Read an entire log entry into a buffer, if there are any remaining. Updates the read entry
568 /// ID to point at the next entry when done.
569 /// Returns:
570 /// * `Ok(())` on success.
571 /// * `Err((Result<(), ErrorCode>, Option<buffer>))` on failure. The
572 /// buffer will only be `None` if the error is due to a loss of the
573 /// buffer.
574 /// `Result<(), ErrorCode>`s used:
575 /// * `FAIL`: reached end of log, nothing to read.
576 /// * `BUSY`: log busy with another operation, try again later.
577 /// * `INVAL`: provided client buffer is too small.
578 /// * `CANCEL`: invalid internal state, read entry ID was reset to start of log.
579 /// * `RESERVE`: client or internal pagebuffer missing.
580 /// * `SIZE`: buffer not large enough to contain entry being read.
581 /// `Result<(), ErrorCode>`s used in read_done callback:
582 /// * `Ok(())`: read succeeded.
583 fn read(
584 &self,
585 buffer: &'static mut [u8],
586 length: usize,
587 ) -> Result<(), (ErrorCode, &'static mut [u8])> {
588 // Check for failure cases.
589 if self.state.get() != State::Idle {
590 // Log busy, try reading again later.
591 return Err((ErrorCode::BUSY, buffer));
592 } else if buffer.len() < length {
593 // Client buffer too small for provided length.
594 return Err((ErrorCode::INVAL, buffer));
595 } else if self.read_entry_id.get() > self.append_entry_id.get() {
596 // Read entry ID beyond append entry ID, must be invalid.
597 self.read_entry_id.set(self.oldest_entry_id.get());
598 return Err((ErrorCode::CANCEL, buffer));
599 } else if self.read_client.is_none() {
600 // No client for callback.
601 return Err((ErrorCode::RESERVE, buffer));
602 }
603
604 // Try reading next entry.
605 match self.read_entry(buffer, length) {
606 Ok(bytes_read) => {
607 self.state.set(State::Read);
608 self.buffer.replace(buffer);
609 self.length.set(bytes_read);
610 self.error.set(Ok(()));
611 self.deferred_client_callback();
612 Ok(())
613 }
614 Err(return_code) => Err((return_code.unwrap_err(), buffer)),
615 }
616 }
617
618 /// Returns the ID of the oldest remaining entry in the log.
619 fn log_start(&self) -> Self::EntryID {
620 self.oldest_entry_id.get()
621 }
622
623 /// Returns the ID of the newest entry in the log.
624 fn log_end(&self) -> Self::EntryID {
625 self.append_entry_id.get()
626 }
627
628 /// Returns the ID of the next entry to be read.
629 fn next_read_entry_id(&self) -> Self::EntryID {
630 self.read_entry_id.get()
631 }
632
633 /// Seek to a new read entry ID. It is only legal to seek to entry IDs retrieved through the
634 /// `log_start()`, `log_end()`, and `next_read_entry_id()` functions.
635 /// Result<(), ErrorCode>s used:
636 /// * Ok(()): seek succeeded.
637 /// * INVAL: entry ID not valid seek position within current log.
638 /// * RESERVE: no log client set.
639 fn seek(&self, entry_id: Self::EntryID) -> Result<(), ErrorCode> {
640 if entry_id <= self.append_entry_id.get() && entry_id >= self.oldest_entry_id.get() {
641 self.read_entry_id.set(entry_id);
642
643 self.state.set(State::Seek);
644 self.error.set(Ok(()));
645 self.deferred_client_callback();
646 Ok(())
647 } else {
648 Err(ErrorCode::INVAL)
649 }
650 }
651
652 /// Get approximate log capacity in bytes.
653 fn get_size(&self) -> usize {
654 self.capacity
655 }
656}
657
658impl<'a, F: Flash + 'static> LogWrite<'a> for Log<'a, F> {
659 /// Set the client for append operation callbacks.
660 fn set_append_client(&self, append_client: &'a dyn LogWriteClient) {
661 self.append_client.set(append_client);
662 }
663
664 /// Appends an entry onto the end of the log. Entry must fit within a page (including log
665 /// metadata).
666 /// Returns:
667 /// * `Ok(())` on success.
668 /// * `Err((Result<(), ErrorCode>, Option<buffer>))1 on failure. The buffer will only be `None` if the
669 /// error is due to a loss of the buffer.
670 /// `Result<(), ErrorCode>`s used:
671 /// * `FAIL`: end of non-circular log reached, cannot append any more entries.
672 /// * `BUSY`: log busy with another operation, try again later.
673 /// * `INVAL`: provided client buffer is too small.
674 /// * `RESERVE`: client or internal pagebuffer missing.
675 /// * `SIZE`: entry too large to append to log.
676 /// `Result<(), ErrorCode>`s used in append_done callback:
677 /// * `Ok(())`: append succeeded.
678 /// * `FAIL`: write failed due to flash error.
679 /// * `CANCEL`: write failed due to reaching the end of a non-circular log.
680 fn append(
681 &self,
682 buffer: &'static mut [u8],
683 length: usize,
684 ) -> Result<(), (ErrorCode, &'static mut [u8])> {
685 let entry_size = length + ENTRY_HEADER_SIZE;
686
687 // Check for failure cases.
688 if self.state.get() != State::Idle {
689 // Log busy, try appending again later.
690 return Err((ErrorCode::BUSY, buffer));
691 } else if length == 0 || buffer.len() < length {
692 // Invalid length provided.
693 return Err((ErrorCode::INVAL, buffer));
694 } else if entry_size + PAGE_HEADER_SIZE > self.page_size {
695 // Entry too big, won't fit within a single page.
696 return Err((ErrorCode::SIZE, buffer));
697 } else if !self.circular && self.append_entry_id.get() + entry_size > self.volume.len() {
698 // End of non-circular log has been reached.
699 return Err((ErrorCode::FAIL, buffer));
700 }
701
702 // Perform append.
703 match self.pagebuffer.take() {
704 Some(pagebuffer) => {
705 self.state.set(State::Append);
706 self.length.set(length);
707
708 // Check if previous page needs to be flushed and new entry will fit within space
709 // remaining in current page.
710 let append_entry_id = self.append_entry_id.get();
711 let flush_prev_page = append_entry_id % self.page_size == 0;
712 let space_remaining = self.page_size - append_entry_id % self.page_size;
713 if !flush_prev_page && entry_size <= space_remaining {
714 // Entry fits, append it.
715 self.append_entry(buffer, length, pagebuffer);
716 Ok(())
717 } else {
718 // Need to sync pagebuffer first, then append to new page.
719 self.buffer.replace(buffer);
720 let return_code = self.flush_pagebuffer(pagebuffer);
721 if return_code == Ok(()) {
722 Ok(())
723 } else {
724 self.state.set(State::Idle);
725 self.buffer.take().map_or_else(
726 || panic!("No buffer to return"),
727 move |buffer| Err((return_code.unwrap_err(), buffer)),
728 )
729 }
730 }
731 }
732 None => Err((ErrorCode::RESERVE, buffer)),
733 }
734 }
735
736 /// Sync log to storage.
737 /// Result<(), ErrorCode>s used:
738 /// * Ok(()): flush started successfully.
739 /// * FAIL: flash driver not configured.
740 /// * BUSY: log or flash driver busy, try again later.
741 /// * RESERVE: no log client set.
742 /// Result<(), ErrorCode>s used in sync_done callback:
743 /// * Ok(()): append succeeded.
744 /// * FAIL: write failed due to flash error.
745 fn sync(&self) -> Result<(), ErrorCode> {
746 if self.append_entry_id.get() % self.page_size == PAGE_HEADER_SIZE {
747 // Pagebuffer empty, don't need to flush.
748 return Ok(());
749 } else if self.state.get() != State::Idle {
750 // Log busy, try appending again later.
751 return Err(ErrorCode::BUSY);
752 }
753
754 self.pagebuffer
755 .take()
756 .map_or(Err(ErrorCode::RESERVE), move |pagebuffer| {
757 self.state.set(State::Sync);
758 let return_code = self.flush_pagebuffer(pagebuffer);
759 if return_code != Ok(()) {
760 self.state.set(State::Idle);
761 }
762 return_code
763 })
764 }
765
766 /// Erase the entire log.
767 /// Result<(), ErrorCode>s used:
768 /// * Ok(()): flush started successfully.
769 /// * BUSY: log busy, try again later.
770 /// Result<(), ErrorCode>s used in erase_done callback:
771 /// * Ok(()): erase succeeded.
772 /// * BUSY: erase interrupted by busy flash driver. Call erase again to resume.
773 fn erase(&self) -> Result<(), ErrorCode> {
774 if self.state.get() != State::Idle {
775 // Log busy, try appending again later.
776 return Err(ErrorCode::BUSY);
777 }
778
779 self.state.set(State::Erase);
780 self.erase_page()
781 }
782}
783
784impl<F: Flash + 'static> flash::Client<F> for Log<'_, F> {
785 fn read_complete(&self, _read_buffer: &'static mut F::Page, _result: Result<(), flash::Error>) {
786 // Reads are made directly from the storage volume, not through the flash interface.
787 unreachable!();
788 }
789
790 /// If in the middle of a write operation, reset pagebuffer and finish write. If syncing, make
791 /// successful client callback.
792 fn write_complete(&self, pagebuffer: &'static mut F::Page, result: Result<(), flash::Error>) {
793 match result.is_ok() {
794 true => {
795 match self.state.get() {
796 State::Append => {
797 // Reset pagebuffer and finish writing on the new page.
798 if self.reset_pagebuffer(pagebuffer) {
799 self.buffer
800 .take()
801 .map(move |buffer| {
802 self.append_entry(buffer, self.length.get(), pagebuffer);
803 })
804 .unwrap();
805 } else {
806 self.pagebuffer.replace(pagebuffer);
807 self.length.set(0);
808 self.records_lost.set(false);
809 self.error.set(Err(ErrorCode::CANCEL));
810 self.client_callback();
811 }
812 }
813 State::Sync => {
814 // Reset pagebuffer if synced page was full.
815 if self.append_entry_id.get() % self.page_size == 0 {
816 self.reset_pagebuffer(pagebuffer);
817 }
818
819 self.pagebuffer.replace(pagebuffer);
820 self.error.set(Ok(()));
821 self.client_callback();
822 }
823 _ => unreachable!(),
824 }
825 }
826 false => {
827 match result.unwrap_err() {
828 flash::Error::FlashError | flash::Error::FlashMemoryProtectionError => {
829 // Make client callback with FAIL return code.
830 self.pagebuffer.replace(pagebuffer);
831 match self.state.get() {
832 State::Append => {
833 self.length.set(0);
834 self.records_lost.set(false);
835 self.error.set(Err(ErrorCode::FAIL));
836 self.client_callback();
837 }
838 State::Sync => {
839 self.error.set(Err(ErrorCode::FAIL));
840 self.client_callback();
841 }
842 _ => unreachable!(),
843 }
844 }
845 }
846 }
847 }
848 }
849
850 /// Erase next page if log erase complete, else make client callback. Fails with BUSY if flash
851 /// is busy and erase cannot be completed.
852 fn erase_complete(&self, result: Result<(), flash::Error>) {
853 match result.is_ok() {
854 true => {
855 let oldest_entry_id = self.oldest_entry_id.get();
856 if oldest_entry_id >= self.append_entry_id.get() - self.page_size {
857 // Erased all pages. Reset state and callback client.
858 if self.reset() {
859 self.error.set(Ok(()));
860 } else {
861 self.error.set(Err(ErrorCode::RESERVE));
862 }
863 self.client_callback();
864 } else {
865 // Not done, erase next page.
866 self.oldest_entry_id.set(oldest_entry_id + self.page_size);
867 let status = self.erase_page();
868
869 // Abort and alert client if flash driver is busy.
870 if status == Err(ErrorCode::BUSY) {
871 self.read_entry_id
872 .set(core::cmp::max(self.read_entry_id.get(), oldest_entry_id));
873 self.error.set(Err(ErrorCode::BUSY));
874 self.client_callback();
875 }
876 }
877 }
878 false => match result.unwrap_err() {
879 flash::Error::FlashError | flash::Error::FlashMemoryProtectionError => {
880 self.error.set(Err(ErrorCode::FAIL));
881 self.client_callback();
882 }
883 },
884 }
885 }
886}
887
888impl<F: Flash + 'static> DeferredCallClient for Log<'_, F> {
889 fn handle_deferred_call(&self) {
890 self.client_callback();
891 }
892
893 fn register(&'static self) {
894 self.deferred_call.register(self);
895 }
896}