lowrisc/
spi_host.rs

1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Serial Peripheral Interface (SPI) Host Driver
6use core::cell::Cell;
7use core::cmp;
8use kernel::hil;
9use kernel::hil::spi::SpiMaster;
10use kernel::hil::spi::{ClockPhase, ClockPolarity};
11use kernel::utilities::cells::{MapCell, OptionalCell};
12use kernel::utilities::leasable_buffer::SubSliceMut;
13use kernel::utilities::registers::interfaces::{ReadWriteable, Readable, Writeable};
14use kernel::utilities::registers::{
15    register_bitfields, register_structs, ReadOnly, ReadWrite, WriteOnly,
16};
17use kernel::utilities::StaticRef;
18use kernel::ErrorCode;
19
20#[derive(Clone, Copy, Debug, PartialEq, Eq)]
21pub enum SpiHostStatus {
22    SpiTransferCmplt,
23    SpiTransferInprog,
24}
25
26register_structs! {
27    pub SpiHostRegisters {
28        //SPI: Interrupt State Register, type rw1c
29        (0x000 => intr_state: ReadWrite<u32, intr::Register>),
30        //SPI: Interrupt Enable Register
31        (0x004 => intr_enable: ReadWrite<u32, intr::Register>),
32        //SPI: Interrupt Test Register
33        (0x008 => intr_test: WriteOnly<u32, intr::Register>),
34        //SPI: Alert Test Register
35        (0x00c => alert_test: WriteOnly<u32, alert_test::Register>),
36        //SPI: Control register
37        (0x010 => ctrl: ReadWrite<u32, ctrl::Register>),
38        //SPI: Status register
39        (0x014 => status: ReadOnly<u32, status::Register>),
40        //SPI: Configuration options register.
41        (0x018 => config_opts: ReadWrite<u32, conf_opts::Register>),
42        //SPI: Chip-Select ID
43        (0x01c => csid: ReadWrite<u32, csid_ctrl::Register>),
44        //SPI: Command Register
45        (0x020 => command: WriteOnly<u32, command::Register>),
46        //SPI: Received Data
47        (0x024 => rx_data: ReadWrite<u32, rx_data::Register>),
48        //SPI: Transmit Data
49        (0x028 => tx_data: WriteOnly<u32, tx_data::Register>),
50        //SPI: Controls which classes of errors raise an interrupt.
51        (0x02c => err_en: ReadWrite<u32, err_en::Register>),
52        //SPI: Indicates that any errors that have occurred, type rw1c
53        (0x030 => err_status: ReadWrite<u32, err_status::Register>),
54        //SPI: Controls which classes of SPI events raise an interrupt
55        (0x034 => event_en: ReadWrite<u32, event_en::Register>),
56        (0x38 => @END),
57    }
58}
59
60register_bitfields![u32,
61    intr [
62        ERROR OFFSET(0) NUMBITS(1) [],
63        SPI_EVENT OFFSET(1) NUMBITS(1) [],
64    ],
65    alert_test [
66        FETAL_FAULT OFFSET(0) NUMBITS(1) [],
67    ],
68    ctrl [
69        RX_WATERMARK OFFSET(0) NUMBITS(8) [],
70        TX_WATERMARK OFFSET(8) NUMBITS(8) [],
71        //28:16 RESERVED
72        OUTPUT_EN OFFSET(29) NUMBITS(1) [],
73        SW_RST OFFSET(30) NUMBITS(1) [],
74        SPIEN OFFSET(31) NUMBITS(1) []
75    ],
76    status [
77        TXQD OFFSET(0) NUMBITS(8) [],
78        RXQD OFFSET(15) NUMBITS(8) [],
79        CMDQD OFFSET(16) NUMBITS(1) [],
80        RXWM OFFSET(20) NUMBITS(1) [],
81        BYTEORDER OFFSET(22) NUMBITS(1) [],
82        RXSTALL OFFSET(23) NUMBITS(1) [],
83        RXEMPTY OFFSET(24) NUMBITS(1) [],
84        RXFULL OFFSET(25) NUMBITS(1) [],
85        TXWM OFFSET(26) NUMBITS(1) [],
86        TXSTALL OFFSET(27) NUMBITS(1) [],
87        TXEMPTY OFFSET(28) NUMBITS(1) [],
88        TXFULL OFFSET(29) NUMBITS(1) [],
89        ACTIVE OFFSET(30) NUMBITS(1) [],
90        READY OFFSET(31) NUMBITS(1) [],
91    ],
92    conf_opts [
93        CLKDIV_0 OFFSET(0) NUMBITS(16) [],
94        CSNIDLE_0 OFFSET(16) NUMBITS(3) [],
95        CSNTRAIL_0 OFFSET(20) NUMBITS(3) [],
96        CSNLEAD_0 OFFSET(24) NUMBITS(3) [],
97        //28 Reserved
98        FULLCYC_0 OFFSET(29) NUMBITS(1) [],
99        CPHA_0 OFFSET(30) NUMBITS(1) [],
100        CPOL_0 OFFSET(31) NUMBITS(1) [],
101    ],
102    csid_ctrl [
103        CSID OFFSET(0) NUMBITS(32) [],
104    ],
105    command [
106        LEN OFFSET(0) NUMBITS(8) [],
107        CSAAT OFFSET(9) NUMBITS(1) [],
108        SPEED OFFSET(10) NUMBITS(2) [],
109        DIRECTION OFFSET(12) NUMBITS(2) [],
110    ],
111    rx_data [
112        DATA OFFSET(0) NUMBITS(32) [],
113    ],
114    tx_data [
115        DATA OFFSET(0) NUMBITS(32) [],
116    ],
117    err_en [
118        CMDBUSY OFFSET(0) NUMBITS(1) [],
119        OVERFLOW OFFSET(1) NUMBITS(1) [],
120        UNDERFLOW OFFSET(2) NUMBITS(1) [],
121        CMDINVAL OFFSET(3) NUMBITS(1) [],
122        CSIDINVAL OFFSET(4) NUMBITS(1) [],
123    ],
124    err_status [
125        CMDBUSY OFFSET(0) NUMBITS(1) [],
126        OVERFLOW OFFSET(1) NUMBITS(1) [],
127        UNDERFLOW OFFSET(2) NUMBITS(1) [],
128        CMDINVAL OFFSET(3) NUMBITS(1) [],
129        CSIDINVAL OFFSET(4) NUMBITS(1) [],
130        ACCESSINVAL OFFSET(5) NUMBITS(1) [],
131    ],
132    event_en [
133        RXFULL OFFSET(0) NUMBITS(1) [],
134        TXEMPTY OFFSET(1) NUMBITS(1) [],
135        RXWM OFFSET(2) NUMBITS(1) [],
136        TXWM OFFSET(3) NUMBITS(1) [],
137        READY OFFSET(4) NUMBITS(1) [],
138        IDLE OFFSET(5) NUMBITS(1) [],
139    ],
140];
141
142pub struct SpiHost<'a> {
143    registers: StaticRef<SpiHostRegisters>,
144    client: OptionalCell<&'a dyn hil::spi::SpiMasterClient>,
145    busy: Cell<bool>,
146    cpu_clk: u32,
147    tsclk: Cell<u32>,
148    tx_buf: MapCell<SubSliceMut<'static, u8>>,
149    rx_buf: MapCell<SubSliceMut<'static, u8>>,
150    tx_len: Cell<usize>,
151    rx_len: Cell<usize>,
152    tx_offset: Cell<usize>,
153    rx_offset: Cell<usize>,
154}
155// SPI Host Command Direction: Bidirectional
156const SPI_HOST_CMD_BIDIRECTIONAL: u32 = 3;
157// SPI Host Command Speed: Standard SPI
158const SPI_HOST_CMD_STANDARD_SPI: u32 = 0;
159
160impl SpiHost<'_> {
161    pub fn new(base: StaticRef<SpiHostRegisters>, cpu_clk: u32) -> Self {
162        SpiHost {
163            registers: base,
164            client: OptionalCell::empty(),
165            busy: Cell::new(false),
166            cpu_clk,
167            tsclk: Cell::new(0),
168            tx_buf: MapCell::empty(),
169            rx_buf: MapCell::empty(),
170            tx_len: Cell::new(0),
171            rx_len: Cell::new(0),
172            tx_offset: Cell::new(0),
173            rx_offset: Cell::new(0),
174        }
175    }
176
177    pub fn handle_interrupt(&self) {
178        let regs = self.registers;
179        let irq = regs.intr_state.extract();
180        self.disable_interrupts();
181
182        if irq.is_set(intr::ERROR) {
183            //Clear all pending errors.
184            self.clear_err_interrupt();
185            //Something went wrong, reset IP and clear buffers
186            self.reset_spi_ip();
187            self.reset_internal_state();
188            //r/w_done() may call r/w_bytes() to re-attempt transfer
189            self.client.map(|client| match self.tx_buf.take() {
190                None => (),
191                Some(tx_buf) => {
192                    client.read_write_done(tx_buf, self.rx_buf.take(), Err(ErrorCode::FAIL))
193                }
194            });
195            return;
196        }
197
198        if irq.is_set(intr::SPI_EVENT) {
199            let status = regs.status.extract();
200            self.clear_event_interrupt();
201
202            //This could be set at init, so only follow through
203            //once a transfer has started (is_busy())
204            if status.is_set(status::TXEMPTY) && self.is_busy() {
205                match self.continue_transfer() {
206                    Ok(SpiHostStatus::SpiTransferCmplt) => {
207                        // Transfer success
208                        self.client.map(|client| match self.tx_buf.take() {
209                            None => (),
210                            Some(tx_buf) => client.read_write_done(
211                                tx_buf,
212                                self.rx_buf.take(),
213                                Ok(self.tx_len.get()),
214                            ),
215                        });
216
217                        self.disable_tx_interrupt();
218                        self.reset_internal_state();
219                    }
220                    Ok(SpiHostStatus::SpiTransferInprog) => {}
221                    Err(err) => {
222                        //Transfer failed, lets clean up
223                        //Clear all pending interrupts.
224                        self.clear_err_interrupt();
225                        //Something went wrong, reset IP and clear buffers
226                        self.reset_spi_ip();
227                        self.reset_internal_state();
228                        self.client.map(|client| match self.tx_buf.take() {
229                            None => (),
230                            Some(tx_buf) => {
231                                client.read_write_done(tx_buf, self.rx_buf.take(), Err(err))
232                            }
233                        });
234                    }
235                }
236            } else {
237                self.enable_interrupts();
238            }
239        }
240    }
241
242    //Determine if transfer complete or if we need to keep
243    //writing from an offset.
244    fn continue_transfer(&self) -> Result<SpiHostStatus, ErrorCode> {
245        let rc = self
246            .rx_buf
247            .take()
248            .map(|mut rx_buf| -> Result<SpiHostStatus, ErrorCode> {
249                let regs = self.registers;
250                let mut val32: u32;
251                let mut val8: u8;
252                let mut shift_mask;
253                let rx_len = self.tx_offset.get() - self.rx_offset.get();
254                let read_cycles = self.div_up(rx_len, 4);
255
256                //Receive rx_data (Only 4byte reads are supported)
257                for _n in 0..read_cycles {
258                    val32 = regs.rx_data.read(rx_data::DATA);
259                    shift_mask = 0xFF;
260                    for i in 0..4 {
261                        if self.rx_offset.get() >= self.rx_len.get() {
262                            break;
263                        }
264                        val8 = ((val32 & shift_mask) >> (i * 8)) as u8;
265                        if let Some(ptr) = rx_buf.as_slice().get_mut(self.rx_offset.get()) {
266                            *ptr = val8;
267                        } else {
268                            // We have run out of rx buffer size
269                            break;
270                        }
271                        self.rx_offset.set(self.rx_offset.get() + 1);
272                        shift_mask <<= 8;
273                    }
274                }
275                //Save buffer!
276                self.rx_buf.replace(rx_buf);
277                //Transfer was complete */
278                if self.tx_offset.get() == self.tx_len.get() {
279                    Ok(SpiHostStatus::SpiTransferCmplt)
280                } else {
281                    //Theres more to transfer, continue writing from the offset
282                    self.spi_transfer_progress()
283                }
284            })
285            .map_or_else(|| Err(ErrorCode::FAIL), |rc| rc);
286
287        rc
288    }
289
290    /// Continue SPI transfer from offset point
291    fn spi_transfer_progress(&self) -> Result<SpiHostStatus, ErrorCode> {
292        let mut transfer_complete = false;
293        if self
294            .tx_buf
295            .take()
296            .map(|mut tx_buf| -> Result<(), ErrorCode> {
297                let regs = self.registers;
298                let mut t_byte: u32;
299                let mut tx_slice: [u8; 4];
300
301                if regs.status.read(status::TXQD) != 0 || regs.status.read(status::ACTIVE) != 0 {
302                    self.tx_buf.replace(tx_buf);
303                    return Err(ErrorCode::BUSY);
304                }
305
306                while !regs.status.is_set(status::TXFULL) && regs.status.read(status::TXQD) < 64 {
307                    tx_slice = [0, 0, 0, 0];
308                    for elem in tx_slice.iter_mut() {
309                        if self.tx_offset.get() >= self.tx_len.get() {
310                            break;
311                        }
312                        if let Some(val) = tx_buf.as_slice().get(self.tx_offset.get()) {
313                            *elem = *val;
314                            self.tx_offset.set(self.tx_offset.get() + 1);
315                        } else {
316                            //Unexpectedly ran out of tx buffer
317                            break;
318                        }
319                    }
320                    t_byte = u32::from_le_bytes(tx_slice);
321                    regs.tx_data.write(tx_data::DATA.val(t_byte));
322
323                    //Transfer Complete in one-shot
324                    if self.tx_offset.get() >= self.tx_len.get() {
325                        transfer_complete = true;
326                        break;
327                    }
328                }
329
330                //Hold tx_buf for offset transfer continue
331                self.tx_buf.replace(tx_buf);
332
333                //Set command register to init transfer
334                self.start_transceive();
335                Ok(())
336            })
337            .transpose()
338            .is_err()
339        {
340            return Err(ErrorCode::BUSY);
341        }
342
343        if transfer_complete {
344            Ok(SpiHostStatus::SpiTransferCmplt)
345        } else {
346            Ok(SpiHostStatus::SpiTransferInprog)
347        }
348    }
349
350    /// Issue a command to start SPI transaction
351    /// Currently only Bi-Directional transactions are supported
352    fn start_transceive(&self) {
353        let regs = self.registers;
354        //TXQD holds number of 32bit words
355        let txfifo_num_bytes = regs.status.read(status::TXQD) * 4;
356
357        //8-bits that describe command transfer len (cannot exceed 255)
358        let num_transfer_bytes: u32 = if txfifo_num_bytes > u8::MAX as u32 {
359            u8::MAX as u32
360        } else {
361            txfifo_num_bytes
362        };
363
364        self.enable_interrupts();
365        self.enable_tx_interrupt();
366
367        //Flush all data in TXFIFO and assert CSAAT for all
368        // but the last transfer segment.
369        if self.tx_offset.get() >= self.tx_len.get() {
370            regs.command.write(
371                command::LEN.val(num_transfer_bytes)
372                    + command::DIRECTION.val(SPI_HOST_CMD_BIDIRECTIONAL)
373                    + command::CSAAT::CLEAR
374                    + command::SPEED.val(SPI_HOST_CMD_STANDARD_SPI),
375            );
376        } else {
377            regs.command.write(
378                command::LEN.val(num_transfer_bytes)
379                    + command::DIRECTION.val(SPI_HOST_CMD_BIDIRECTIONAL)
380                    + command::CSAAT::SET
381                    + command::SPEED.val(SPI_HOST_CMD_STANDARD_SPI),
382            );
383        }
384    }
385
386    /// Reset the soft internal state, should be called once
387    /// a spi transaction has been completed.
388    fn reset_internal_state(&self) {
389        self.clear_spi_busy();
390        self.tx_len.set(0);
391        self.rx_len.set(0);
392        self.tx_offset.set(0);
393        self.rx_offset.set(0);
394
395        debug_assert!(self.tx_buf.is_none());
396        debug_assert!(self.rx_buf.is_none());
397    }
398
399    /// Enable SPI_HOST IP
400    /// `dead_code` to silence warnings when not building for mainline qemu
401    #[allow(dead_code)]
402    fn enable_spi_host(&self) {
403        let regs = self.registers;
404        //Enables the SPI host
405        regs.ctrl.modify(ctrl::SPIEN::SET + ctrl::OUTPUT_EN::SET);
406    }
407
408    /// Reset SPI Host
409    fn reset_spi_ip(&self) {
410        let regs = self.registers;
411        //IP to reset state
412        regs.ctrl.modify(ctrl::SW_RST::SET);
413
414        //Wait for status ready to be set before continuing
415        while regs.status.is_set(status::ACTIVE) {}
416        //Wait for both FIFOs to completely drain
417        while regs.status.read(status::TXQD) != 0 && regs.status.read(status::RXQD) != 0 {}
418        //Clear Reset
419        regs.ctrl.modify(ctrl::SW_RST::CLEAR);
420    }
421
422    /// Enable both event/err IRQ
423    fn enable_interrupts(&self) {
424        self.registers
425            .intr_state
426            .write(intr::ERROR::SET + intr::SPI_EVENT::SET);
427        self.registers
428            .intr_enable
429            .modify(intr::ERROR::SET + intr::SPI_EVENT::SET);
430    }
431
432    /// Disable both event/err IRQ
433    fn disable_interrupts(&self) {
434        let regs = self.registers;
435        regs.intr_enable
436            .write(intr::ERROR::CLEAR + intr::SPI_EVENT::CLEAR);
437    }
438
439    /// Clear the error IRQ
440    fn clear_err_interrupt(&self) {
441        let regs = self.registers;
442        //Clear Error Masks (rw1c)
443        regs.err_status.modify(err_status::CMDBUSY::SET);
444        regs.err_status.modify(err_status::OVERFLOW::SET);
445        regs.err_status.modify(err_status::UNDERFLOW::SET);
446        regs.err_status.modify(err_status::CMDINVAL::SET);
447        regs.err_status.modify(err_status::CSIDINVAL::SET);
448        regs.err_status.modify(err_status::ACCESSINVAL::SET);
449        //Clear Error IRQ
450        regs.intr_state.modify(intr::ERROR::SET);
451    }
452
453    /// Clear the event IRQ
454    fn clear_event_interrupt(&self) {
455        let regs = self.registers;
456        regs.intr_state.modify(intr::SPI_EVENT::SET);
457    }
458    /// Will generate a `test` interrupt on the error irq
459    /// Note: Left to allow debug accessibility
460    #[allow(dead_code)]
461    fn test_error_interrupt(&self) {
462        let regs = self.registers;
463        regs.intr_test.write(intr::ERROR::SET);
464    }
465    /// Clear test interrupts
466    /// Note: Left to allow debug accessibility
467    #[allow(dead_code)]
468    fn clear_tests(&self) {
469        let regs = self.registers;
470        regs.intr_test
471            .write(intr::ERROR::CLEAR + intr::SPI_EVENT::CLEAR);
472    }
473
474    /// Will generate a `test` interrupt on the event irq
475    /// Note: Left to allow debug accessibility
476    #[allow(dead_code)]
477    fn test_event_interrupt(&self) {
478        let regs = self.registers;
479        regs.intr_test.write(intr::SPI_EVENT::SET);
480    }
481
482    /// Enable required `event interrupts`
483    /// `dead_code` to silence warnings when not building for mainline qemu
484    #[allow(dead_code)]
485    fn event_enable(&self) {
486        let regs = self.registers;
487        regs.event_en.write(event_en::TXEMPTY::SET);
488    }
489
490    fn disable_tx_interrupt(&self) {
491        let regs = self.registers;
492        regs.event_en.modify(event_en::TXEMPTY::CLEAR);
493    }
494
495    fn enable_tx_interrupt(&self) {
496        let regs = self.registers;
497        regs.event_en.modify(event_en::TXEMPTY::SET);
498    }
499
500    /// Enable required error interrupts
501    /// `dead_code` to silence warnings when not building for mainline qemu
502    #[allow(dead_code)]
503    fn err_enable(&self) {
504        let regs = self.registers;
505        regs.err_en.modify(
506            err_en::CMDBUSY::SET
507                + err_en::CMDINVAL::SET
508                + err_en::CSIDINVAL::SET
509                + err_en::OVERFLOW::SET
510                + err_en::UNDERFLOW::SET,
511        );
512    }
513
514    fn set_spi_busy(&self) {
515        self.busy.set(true);
516    }
517
518    fn clear_spi_busy(&self) {
519        self.busy.set(false);
520    }
521
522    /// Divide a/b and return a value always rounded
523    /// up to the nearest integer
524    fn div_up(&self, a: usize, b: usize) -> usize {
525        a.div_ceil(b)
526    }
527
528    /// Calculate the scaler based on a specified tsclk rate
529    /// This scaler will pre-scale the cpu_clk and must be <= cpu_clk/2
530    fn calculate_tsck_scaler(&self, rate: u32) -> Result<u16, ErrorCode> {
531        if rate > self.cpu_clk / 2 {
532            return Err(ErrorCode::NOSUPPORT);
533        }
534        //Divide and truncate
535        let mut scaler: u32 = (self.cpu_clk / (2 * rate)) - 1;
536
537        //Increase scaler if the division was not exact, ensuring that it does not overflow
538        //or exceed divider specification where tsck is at most <= Tclk/2
539        if self.cpu_clk % (2 * rate) != 0 && scaler != 0xFF {
540            scaler += 1;
541        }
542        Ok(scaler as u16)
543    }
544}
545
546#[derive(Copy, Clone)]
547pub struct CS(pub u32);
548
549impl hil::spi::cs::IntoChipSelect<CS, hil::spi::cs::ActiveLow> for CS {
550    fn into_cs(self) -> CS {
551        self
552    }
553}
554
555impl<'a> hil::spi::SpiMaster<'a> for SpiHost<'a> {
556    type ChipSelect = CS;
557
558    fn init(&self) -> Result<(), ErrorCode> {
559        let regs = self.registers;
560        self.event_enable();
561        self.err_enable();
562
563        self.enable_interrupts();
564
565        self.enable_spi_host();
566
567        //TODO: I think this is bug in OT, where the `first` word written
568        // (while TXEMPTY) to TX_DATA is dropped/ignored and not added to TX_FIFO (TXQD = 0).
569        // The following write (0x00), works around this `bug`.
570        // Could be Verilator specific
571        regs.tx_data.write(tx_data::DATA.val(0x00));
572        assert_eq!(regs.status.read(status::TXQD), 0);
573        Ok(())
574    }
575
576    fn set_client(&self, client: &'a dyn hil::spi::SpiMasterClient) {
577        self.client.set(client);
578    }
579
580    fn is_busy(&self) -> bool {
581        self.busy.get()
582    }
583
584    fn read_write_bytes(
585        &self,
586        tx_buf: SubSliceMut<'static, u8>,
587        rx_buf: Option<SubSliceMut<'static, u8>>,
588    ) -> Result<
589        (),
590        (
591            ErrorCode,
592            SubSliceMut<'static, u8>,
593            Option<SubSliceMut<'static, u8>>,
594        ),
595    > {
596        debug_assert!(!self.busy.get());
597        debug_assert!(self.tx_buf.is_none());
598        debug_assert!(self.rx_buf.is_none());
599        let regs = self.registers;
600
601        if self.is_busy() || regs.status.is_set(status::TXFULL) {
602            return Err((ErrorCode::BUSY, tx_buf, rx_buf));
603        }
604
605        if rx_buf.is_none() {
606            return Err((ErrorCode::NOMEM, tx_buf, rx_buf));
607        }
608
609        self.tx_len.set(tx_buf.len());
610
611        let mut t_byte: u32;
612        let mut tx_slice: [u8; 4];
613        //We are committing to the transfer now
614        self.set_spi_busy();
615
616        while !regs.status.is_set(status::TXFULL) && regs.status.read(status::TXQD) < 64 {
617            tx_slice = [0, 0, 0, 0];
618            for elem in tx_slice.iter_mut() {
619                if self.tx_offset.get() >= self.tx_len.get() {
620                    break;
621                }
622                *elem = tx_buf[self.tx_offset.get()];
623                self.tx_offset.set(self.tx_offset.get() + 1);
624            }
625            t_byte = u32::from_le_bytes(tx_slice);
626            regs.tx_data.write(tx_data::DATA.val(t_byte));
627
628            //Transfer Complete in one-shot
629            if self.tx_offset.get() >= self.tx_len.get() {
630                break;
631            }
632        }
633
634        //Hold tx_buf for offset transfer continue
635        self.tx_buf.replace(tx_buf);
636
637        //Hold rx_buf for later
638
639        rx_buf.map(|rx_buf_t| {
640            self.rx_len.set(cmp::min(self.tx_len.get(), rx_buf_t.len()));
641            self.rx_buf.replace(rx_buf_t);
642        });
643
644        //Set command register to init transfer
645        self.start_transceive();
646
647        Ok(())
648    }
649
650    fn write_byte(&self, _val: u8) -> Result<(), ErrorCode> {
651        //Use `read_write_bytes()` instead.
652        Err(ErrorCode::FAIL)
653    }
654
655    fn read_byte(&self) -> Result<u8, ErrorCode> {
656        //Use `read_write_bytes()` instead.
657        Err(ErrorCode::FAIL)
658    }
659
660    fn read_write_byte(&self, _val: u8) -> Result<u8, ErrorCode> {
661        //Use `read_write_bytes()` instead.
662        Err(ErrorCode::FAIL)
663    }
664
665    fn specify_chip_select(&self, cs: Self::ChipSelect) -> Result<(), ErrorCode> {
666        let regs = self.registers;
667
668        //CSID will index the CONFIGOPTS multi-register
669        regs.csid.write(csid_ctrl::CSID.val(cs.0));
670
671        Ok(())
672    }
673
674    fn set_rate(&self, rate: u32) -> Result<u32, ErrorCode> {
675        let regs = self.registers;
676
677        match self.calculate_tsck_scaler(rate) {
678            Ok(scaler) => {
679                regs.config_opts
680                    .modify(conf_opts::CLKDIV_0.val(scaler as u32));
681                self.tsclk.set(rate);
682                Ok(rate)
683            }
684            Err(e) => Err(e),
685        }
686    }
687
688    fn get_rate(&self) -> u32 {
689        self.tsclk.get()
690    }
691
692    fn set_polarity(&self, polarity: ClockPolarity) -> Result<(), ErrorCode> {
693        let regs = self.registers;
694        match polarity {
695            ClockPolarity::IdleLow => regs.config_opts.modify(conf_opts::CPOL_0::CLEAR),
696            ClockPolarity::IdleHigh => regs.config_opts.modify(conf_opts::CPOL_0::SET),
697        }
698        Ok(())
699    }
700
701    fn get_polarity(&self) -> ClockPolarity {
702        let regs = self.registers;
703
704        match regs.config_opts.read(conf_opts::CPOL_0) {
705            0 => ClockPolarity::IdleLow,
706            1 => ClockPolarity::IdleHigh,
707            _ => unreachable!(),
708        }
709    }
710
711    fn set_phase(&self, phase: ClockPhase) -> Result<(), ErrorCode> {
712        let regs = self.registers;
713        match phase {
714            ClockPhase::SampleLeading => regs.config_opts.modify(conf_opts::CPHA_0::CLEAR),
715            ClockPhase::SampleTrailing => regs.config_opts.modify(conf_opts::CPHA_0::SET),
716        }
717        Ok(())
718    }
719
720    fn get_phase(&self) -> ClockPhase {
721        let regs = self.registers;
722
723        match regs.config_opts.read(conf_opts::CPHA_0) {
724            1 => ClockPhase::SampleTrailing,
725            0 => ClockPhase::SampleLeading,
726            _ => unreachable!(),
727        }
728    }
729
730    /// hold_low is controlled by IP based on command segments issued
731    /// force holds are not supported
732    fn hold_low(&self) {
733        unimplemented!("spi_host: does not support hold low");
734    }
735
736    /// release_low is controlled by IP based on command segments issued
737    /// force releases are not supported
738    fn release_low(&self) {
739        unimplemented!("spi_host: does not support release low");
740    }
741}