1use core::cell::Cell;
7use core::cmp;
8use kernel::hil;
9use kernel::hil::spi::SpiMaster;
10use kernel::hil::spi::{ClockPhase, ClockPolarity};
11use kernel::utilities::cells::{MapCell, OptionalCell};
12use kernel::utilities::leasable_buffer::SubSliceMut;
13use kernel::utilities::registers::interfaces::{ReadWriteable, Readable, Writeable};
14use kernel::utilities::registers::{
15 register_bitfields, register_structs, ReadOnly, ReadWrite, WriteOnly,
16};
17use kernel::utilities::StaticRef;
18use kernel::ErrorCode;
19
20#[derive(Clone, Copy, Debug, PartialEq, Eq)]
21pub enum SpiHostStatus {
22 SpiTransferCmplt,
23 SpiTransferInprog,
24}
25
26register_structs! {
27 pub SpiHostRegisters {
28 (0x000 => intr_state: ReadWrite<u32, intr::Register>),
30 (0x004 => intr_enable: ReadWrite<u32, intr::Register>),
32 (0x008 => intr_test: WriteOnly<u32, intr::Register>),
34 (0x00c => alert_test: WriteOnly<u32, alert_test::Register>),
36 (0x010 => ctrl: ReadWrite<u32, ctrl::Register>),
38 (0x014 => status: ReadOnly<u32, status::Register>),
40 (0x018 => config_opts: ReadWrite<u32, conf_opts::Register>),
42 (0x01c => csid: ReadWrite<u32, csid_ctrl::Register>),
44 (0x020 => command: WriteOnly<u32, command::Register>),
46 (0x024 => rx_data: ReadWrite<u32, rx_data::Register>),
48 (0x028 => tx_data: WriteOnly<u32, tx_data::Register>),
50 (0x02c => err_en: ReadWrite<u32, err_en::Register>),
52 (0x030 => err_status: ReadWrite<u32, err_status::Register>),
54 (0x034 => event_en: ReadWrite<u32, event_en::Register>),
56 (0x38 => @END),
57 }
58}
59
60register_bitfields![u32,
61 intr [
62 ERROR OFFSET(0) NUMBITS(1) [],
63 SPI_EVENT OFFSET(1) NUMBITS(1) [],
64 ],
65 alert_test [
66 FETAL_FAULT OFFSET(0) NUMBITS(1) [],
67 ],
68 ctrl [
69 RX_WATERMARK OFFSET(0) NUMBITS(8) [],
70 TX_WATERMARK OFFSET(8) NUMBITS(8) [],
71 OUTPUT_EN OFFSET(29) NUMBITS(1) [],
73 SW_RST OFFSET(30) NUMBITS(1) [],
74 SPIEN OFFSET(31) NUMBITS(1) []
75 ],
76 status [
77 TXQD OFFSET(0) NUMBITS(8) [],
78 RXQD OFFSET(15) NUMBITS(8) [],
79 CMDQD OFFSET(16) NUMBITS(1) [],
80 RXWM OFFSET(20) NUMBITS(1) [],
81 BYTEORDER OFFSET(22) NUMBITS(1) [],
82 RXSTALL OFFSET(23) NUMBITS(1) [],
83 RXEMPTY OFFSET(24) NUMBITS(1) [],
84 RXFULL OFFSET(25) NUMBITS(1) [],
85 TXWM OFFSET(26) NUMBITS(1) [],
86 TXSTALL OFFSET(27) NUMBITS(1) [],
87 TXEMPTY OFFSET(28) NUMBITS(1) [],
88 TXFULL OFFSET(29) NUMBITS(1) [],
89 ACTIVE OFFSET(30) NUMBITS(1) [],
90 READY OFFSET(31) NUMBITS(1) [],
91 ],
92 conf_opts [
93 CLKDIV_0 OFFSET(0) NUMBITS(16) [],
94 CSNIDLE_0 OFFSET(16) NUMBITS(3) [],
95 CSNTRAIL_0 OFFSET(20) NUMBITS(3) [],
96 CSNLEAD_0 OFFSET(24) NUMBITS(3) [],
97 FULLCYC_0 OFFSET(29) NUMBITS(1) [],
99 CPHA_0 OFFSET(30) NUMBITS(1) [],
100 CPOL_0 OFFSET(31) NUMBITS(1) [],
101 ],
102 csid_ctrl [
103 CSID OFFSET(0) NUMBITS(32) [],
104 ],
105 command [
106 LEN OFFSET(0) NUMBITS(8) [],
107 CSAAT OFFSET(9) NUMBITS(1) [],
108 SPEED OFFSET(10) NUMBITS(2) [],
109 DIRECTION OFFSET(12) NUMBITS(2) [],
110 ],
111 rx_data [
112 DATA OFFSET(0) NUMBITS(32) [],
113 ],
114 tx_data [
115 DATA OFFSET(0) NUMBITS(32) [],
116 ],
117 err_en [
118 CMDBUSY OFFSET(0) NUMBITS(1) [],
119 OVERFLOW OFFSET(1) NUMBITS(1) [],
120 UNDERFLOW OFFSET(2) NUMBITS(1) [],
121 CMDINVAL OFFSET(3) NUMBITS(1) [],
122 CSIDINVAL OFFSET(4) NUMBITS(1) [],
123 ],
124 err_status [
125 CMDBUSY OFFSET(0) NUMBITS(1) [],
126 OVERFLOW OFFSET(1) NUMBITS(1) [],
127 UNDERFLOW OFFSET(2) NUMBITS(1) [],
128 CMDINVAL OFFSET(3) NUMBITS(1) [],
129 CSIDINVAL OFFSET(4) NUMBITS(1) [],
130 ACCESSINVAL OFFSET(5) NUMBITS(1) [],
131 ],
132 event_en [
133 RXFULL OFFSET(0) NUMBITS(1) [],
134 TXEMPTY OFFSET(1) NUMBITS(1) [],
135 RXWM OFFSET(2) NUMBITS(1) [],
136 TXWM OFFSET(3) NUMBITS(1) [],
137 READY OFFSET(4) NUMBITS(1) [],
138 IDLE OFFSET(5) NUMBITS(1) [],
139 ],
140];
141
142pub struct SpiHost<'a> {
143 registers: StaticRef<SpiHostRegisters>,
144 client: OptionalCell<&'a dyn hil::spi::SpiMasterClient>,
145 busy: Cell<bool>,
146 cpu_clk: u32,
147 tsclk: Cell<u32>,
148 tx_buf: MapCell<SubSliceMut<'static, u8>>,
149 rx_buf: MapCell<SubSliceMut<'static, u8>>,
150 tx_len: Cell<usize>,
151 rx_len: Cell<usize>,
152 tx_offset: Cell<usize>,
153 rx_offset: Cell<usize>,
154}
155const SPI_HOST_CMD_BIDIRECTIONAL: u32 = 3;
157const SPI_HOST_CMD_STANDARD_SPI: u32 = 0;
159
160impl SpiHost<'_> {
161 pub fn new(base: StaticRef<SpiHostRegisters>, cpu_clk: u32) -> Self {
162 SpiHost {
163 registers: base,
164 client: OptionalCell::empty(),
165 busy: Cell::new(false),
166 cpu_clk,
167 tsclk: Cell::new(0),
168 tx_buf: MapCell::empty(),
169 rx_buf: MapCell::empty(),
170 tx_len: Cell::new(0),
171 rx_len: Cell::new(0),
172 tx_offset: Cell::new(0),
173 rx_offset: Cell::new(0),
174 }
175 }
176
177 pub fn handle_interrupt(&self) {
178 let regs = self.registers;
179 let irq = regs.intr_state.extract();
180 self.disable_interrupts();
181
182 if irq.is_set(intr::ERROR) {
183 self.clear_err_interrupt();
185 self.reset_spi_ip();
187 self.reset_internal_state();
188 self.client.map(|client| match self.tx_buf.take() {
190 None => (),
191 Some(tx_buf) => {
192 client.read_write_done(tx_buf, self.rx_buf.take(), Err(ErrorCode::FAIL))
193 }
194 });
195 return;
196 }
197
198 if irq.is_set(intr::SPI_EVENT) {
199 let status = regs.status.extract();
200 self.clear_event_interrupt();
201
202 if status.is_set(status::TXEMPTY) && self.is_busy() {
205 match self.continue_transfer() {
206 Ok(SpiHostStatus::SpiTransferCmplt) => {
207 self.client.map(|client| match self.tx_buf.take() {
209 None => (),
210 Some(tx_buf) => client.read_write_done(
211 tx_buf,
212 self.rx_buf.take(),
213 Ok(self.tx_len.get()),
214 ),
215 });
216
217 self.disable_tx_interrupt();
218 self.reset_internal_state();
219 }
220 Ok(SpiHostStatus::SpiTransferInprog) => {}
221 Err(err) => {
222 self.clear_err_interrupt();
225 self.reset_spi_ip();
227 self.reset_internal_state();
228 self.client.map(|client| match self.tx_buf.take() {
229 None => (),
230 Some(tx_buf) => {
231 client.read_write_done(tx_buf, self.rx_buf.take(), Err(err))
232 }
233 });
234 }
235 }
236 } else {
237 self.enable_interrupts();
238 }
239 }
240 }
241
242 fn continue_transfer(&self) -> Result<SpiHostStatus, ErrorCode> {
245 let rc = self
246 .rx_buf
247 .take()
248 .map(|mut rx_buf| -> Result<SpiHostStatus, ErrorCode> {
249 let regs = self.registers;
250 let mut val32: u32;
251 let mut val8: u8;
252 let mut shift_mask;
253 let rx_len = self.tx_offset.get() - self.rx_offset.get();
254 let read_cycles = self.div_up(rx_len, 4);
255
256 for _n in 0..read_cycles {
258 val32 = regs.rx_data.read(rx_data::DATA);
259 shift_mask = 0xFF;
260 for i in 0..4 {
261 if self.rx_offset.get() >= self.rx_len.get() {
262 break;
263 }
264 val8 = ((val32 & shift_mask) >> (i * 8)) as u8;
265 if let Some(ptr) = rx_buf.as_slice().get_mut(self.rx_offset.get()) {
266 *ptr = val8;
267 } else {
268 break;
270 }
271 self.rx_offset.set(self.rx_offset.get() + 1);
272 shift_mask <<= 8;
273 }
274 }
275 self.rx_buf.replace(rx_buf);
277 if self.tx_offset.get() == self.tx_len.get() {
279 Ok(SpiHostStatus::SpiTransferCmplt)
280 } else {
281 self.spi_transfer_progress()
283 }
284 })
285 .map_or_else(|| Err(ErrorCode::FAIL), |rc| rc);
286
287 rc
288 }
289
290 fn spi_transfer_progress(&self) -> Result<SpiHostStatus, ErrorCode> {
292 let mut transfer_complete = false;
293 if self
294 .tx_buf
295 .take()
296 .map(|mut tx_buf| -> Result<(), ErrorCode> {
297 let regs = self.registers;
298 let mut t_byte: u32;
299 let mut tx_slice: [u8; 4];
300
301 if regs.status.read(status::TXQD) != 0 || regs.status.read(status::ACTIVE) != 0 {
302 self.tx_buf.replace(tx_buf);
303 return Err(ErrorCode::BUSY);
304 }
305
306 while !regs.status.is_set(status::TXFULL) && regs.status.read(status::TXQD) < 64 {
307 tx_slice = [0, 0, 0, 0];
308 for elem in tx_slice.iter_mut() {
309 if self.tx_offset.get() >= self.tx_len.get() {
310 break;
311 }
312 if let Some(val) = tx_buf.as_slice().get(self.tx_offset.get()) {
313 *elem = *val;
314 self.tx_offset.set(self.tx_offset.get() + 1);
315 } else {
316 break;
318 }
319 }
320 t_byte = u32::from_le_bytes(tx_slice);
321 regs.tx_data.write(tx_data::DATA.val(t_byte));
322
323 if self.tx_offset.get() >= self.tx_len.get() {
325 transfer_complete = true;
326 break;
327 }
328 }
329
330 self.tx_buf.replace(tx_buf);
332
333 self.start_transceive();
335 Ok(())
336 })
337 .transpose()
338 .is_err()
339 {
340 return Err(ErrorCode::BUSY);
341 }
342
343 if transfer_complete {
344 Ok(SpiHostStatus::SpiTransferCmplt)
345 } else {
346 Ok(SpiHostStatus::SpiTransferInprog)
347 }
348 }
349
350 fn start_transceive(&self) {
353 let regs = self.registers;
354 let txfifo_num_bytes = regs.status.read(status::TXQD) * 4;
356
357 let num_transfer_bytes: u32 = if txfifo_num_bytes > u8::MAX as u32 {
359 u8::MAX as u32
360 } else {
361 txfifo_num_bytes
362 };
363
364 self.enable_interrupts();
365 self.enable_tx_interrupt();
366
367 if self.tx_offset.get() >= self.tx_len.get() {
370 regs.command.write(
371 command::LEN.val(num_transfer_bytes)
372 + command::DIRECTION.val(SPI_HOST_CMD_BIDIRECTIONAL)
373 + command::CSAAT::CLEAR
374 + command::SPEED.val(SPI_HOST_CMD_STANDARD_SPI),
375 );
376 } else {
377 regs.command.write(
378 command::LEN.val(num_transfer_bytes)
379 + command::DIRECTION.val(SPI_HOST_CMD_BIDIRECTIONAL)
380 + command::CSAAT::SET
381 + command::SPEED.val(SPI_HOST_CMD_STANDARD_SPI),
382 );
383 }
384 }
385
386 fn reset_internal_state(&self) {
389 self.clear_spi_busy();
390 self.tx_len.set(0);
391 self.rx_len.set(0);
392 self.tx_offset.set(0);
393 self.rx_offset.set(0);
394
395 debug_assert!(self.tx_buf.is_none());
396 debug_assert!(self.rx_buf.is_none());
397 }
398
399 #[allow(dead_code)]
402 fn enable_spi_host(&self) {
403 let regs = self.registers;
404 regs.ctrl.modify(ctrl::SPIEN::SET + ctrl::OUTPUT_EN::SET);
406 }
407
408 fn reset_spi_ip(&self) {
410 let regs = self.registers;
411 regs.ctrl.modify(ctrl::SW_RST::SET);
413
414 while regs.status.is_set(status::ACTIVE) {}
416 while regs.status.read(status::TXQD) != 0 && regs.status.read(status::RXQD) != 0 {}
418 regs.ctrl.modify(ctrl::SW_RST::CLEAR);
420 }
421
422 fn enable_interrupts(&self) {
424 self.registers
425 .intr_state
426 .write(intr::ERROR::SET + intr::SPI_EVENT::SET);
427 self.registers
428 .intr_enable
429 .modify(intr::ERROR::SET + intr::SPI_EVENT::SET);
430 }
431
432 fn disable_interrupts(&self) {
434 let regs = self.registers;
435 regs.intr_enable
436 .write(intr::ERROR::CLEAR + intr::SPI_EVENT::CLEAR);
437 }
438
439 fn clear_err_interrupt(&self) {
441 let regs = self.registers;
442 regs.err_status.modify(err_status::CMDBUSY::SET);
444 regs.err_status.modify(err_status::OVERFLOW::SET);
445 regs.err_status.modify(err_status::UNDERFLOW::SET);
446 regs.err_status.modify(err_status::CMDINVAL::SET);
447 regs.err_status.modify(err_status::CSIDINVAL::SET);
448 regs.err_status.modify(err_status::ACCESSINVAL::SET);
449 regs.intr_state.modify(intr::ERROR::SET);
451 }
452
453 fn clear_event_interrupt(&self) {
455 let regs = self.registers;
456 regs.intr_state.modify(intr::SPI_EVENT::SET);
457 }
458 #[allow(dead_code)]
461 fn test_error_interrupt(&self) {
462 let regs = self.registers;
463 regs.intr_test.write(intr::ERROR::SET);
464 }
465 #[allow(dead_code)]
468 fn clear_tests(&self) {
469 let regs = self.registers;
470 regs.intr_test
471 .write(intr::ERROR::CLEAR + intr::SPI_EVENT::CLEAR);
472 }
473
474 #[allow(dead_code)]
477 fn test_event_interrupt(&self) {
478 let regs = self.registers;
479 regs.intr_test.write(intr::SPI_EVENT::SET);
480 }
481
482 #[allow(dead_code)]
485 fn event_enable(&self) {
486 let regs = self.registers;
487 regs.event_en.write(event_en::TXEMPTY::SET);
488 }
489
490 fn disable_tx_interrupt(&self) {
491 let regs = self.registers;
492 regs.event_en.modify(event_en::TXEMPTY::CLEAR);
493 }
494
495 fn enable_tx_interrupt(&self) {
496 let regs = self.registers;
497 regs.event_en.modify(event_en::TXEMPTY::SET);
498 }
499
500 #[allow(dead_code)]
503 fn err_enable(&self) {
504 let regs = self.registers;
505 regs.err_en.modify(
506 err_en::CMDBUSY::SET
507 + err_en::CMDINVAL::SET
508 + err_en::CSIDINVAL::SET
509 + err_en::OVERFLOW::SET
510 + err_en::UNDERFLOW::SET,
511 );
512 }
513
514 fn set_spi_busy(&self) {
515 self.busy.set(true);
516 }
517
518 fn clear_spi_busy(&self) {
519 self.busy.set(false);
520 }
521
522 fn div_up(&self, a: usize, b: usize) -> usize {
525 a.div_ceil(b)
526 }
527
528 fn calculate_tsck_scaler(&self, rate: u32) -> Result<u16, ErrorCode> {
531 if rate > self.cpu_clk / 2 {
532 return Err(ErrorCode::NOSUPPORT);
533 }
534 let mut scaler: u32 = (self.cpu_clk / (2 * rate)) - 1;
536
537 if self.cpu_clk % (2 * rate) != 0 && scaler != 0xFF {
540 scaler += 1;
541 }
542 Ok(scaler as u16)
543 }
544}
545
546#[derive(Copy, Clone)]
547pub struct CS(pub u32);
548
549impl hil::spi::cs::IntoChipSelect<CS, hil::spi::cs::ActiveLow> for CS {
550 fn into_cs(self) -> CS {
551 self
552 }
553}
554
555impl<'a> hil::spi::SpiMaster<'a> for SpiHost<'a> {
556 type ChipSelect = CS;
557
558 fn init(&self) -> Result<(), ErrorCode> {
559 let regs = self.registers;
560 self.event_enable();
561 self.err_enable();
562
563 self.enable_interrupts();
564
565 self.enable_spi_host();
566
567 regs.tx_data.write(tx_data::DATA.val(0x00));
572 assert_eq!(regs.status.read(status::TXQD), 0);
573 Ok(())
574 }
575
576 fn set_client(&self, client: &'a dyn hil::spi::SpiMasterClient) {
577 self.client.set(client);
578 }
579
580 fn is_busy(&self) -> bool {
581 self.busy.get()
582 }
583
584 fn read_write_bytes(
585 &self,
586 tx_buf: SubSliceMut<'static, u8>,
587 rx_buf: Option<SubSliceMut<'static, u8>>,
588 ) -> Result<
589 (),
590 (
591 ErrorCode,
592 SubSliceMut<'static, u8>,
593 Option<SubSliceMut<'static, u8>>,
594 ),
595 > {
596 debug_assert!(!self.busy.get());
597 debug_assert!(self.tx_buf.is_none());
598 debug_assert!(self.rx_buf.is_none());
599 let regs = self.registers;
600
601 if self.is_busy() || regs.status.is_set(status::TXFULL) {
602 return Err((ErrorCode::BUSY, tx_buf, rx_buf));
603 }
604
605 if rx_buf.is_none() {
606 return Err((ErrorCode::NOMEM, tx_buf, rx_buf));
607 }
608
609 self.tx_len.set(tx_buf.len());
610
611 let mut t_byte: u32;
612 let mut tx_slice: [u8; 4];
613 self.set_spi_busy();
615
616 while !regs.status.is_set(status::TXFULL) && regs.status.read(status::TXQD) < 64 {
617 tx_slice = [0, 0, 0, 0];
618 for elem in tx_slice.iter_mut() {
619 if self.tx_offset.get() >= self.tx_len.get() {
620 break;
621 }
622 *elem = tx_buf[self.tx_offset.get()];
623 self.tx_offset.set(self.tx_offset.get() + 1);
624 }
625 t_byte = u32::from_le_bytes(tx_slice);
626 regs.tx_data.write(tx_data::DATA.val(t_byte));
627
628 if self.tx_offset.get() >= self.tx_len.get() {
630 break;
631 }
632 }
633
634 self.tx_buf.replace(tx_buf);
636
637 rx_buf.map(|rx_buf_t| {
640 self.rx_len.set(cmp::min(self.tx_len.get(), rx_buf_t.len()));
641 self.rx_buf.replace(rx_buf_t);
642 });
643
644 self.start_transceive();
646
647 Ok(())
648 }
649
650 fn write_byte(&self, _val: u8) -> Result<(), ErrorCode> {
651 Err(ErrorCode::FAIL)
653 }
654
655 fn read_byte(&self) -> Result<u8, ErrorCode> {
656 Err(ErrorCode::FAIL)
658 }
659
660 fn read_write_byte(&self, _val: u8) -> Result<u8, ErrorCode> {
661 Err(ErrorCode::FAIL)
663 }
664
665 fn specify_chip_select(&self, cs: Self::ChipSelect) -> Result<(), ErrorCode> {
666 let regs = self.registers;
667
668 regs.csid.write(csid_ctrl::CSID.val(cs.0));
670
671 Ok(())
672 }
673
674 fn set_rate(&self, rate: u32) -> Result<u32, ErrorCode> {
675 let regs = self.registers;
676
677 match self.calculate_tsck_scaler(rate) {
678 Ok(scaler) => {
679 regs.config_opts
680 .modify(conf_opts::CLKDIV_0.val(scaler as u32));
681 self.tsclk.set(rate);
682 Ok(rate)
683 }
684 Err(e) => Err(e),
685 }
686 }
687
688 fn get_rate(&self) -> u32 {
689 self.tsclk.get()
690 }
691
692 fn set_polarity(&self, polarity: ClockPolarity) -> Result<(), ErrorCode> {
693 let regs = self.registers;
694 match polarity {
695 ClockPolarity::IdleLow => regs.config_opts.modify(conf_opts::CPOL_0::CLEAR),
696 ClockPolarity::IdleHigh => regs.config_opts.modify(conf_opts::CPOL_0::SET),
697 }
698 Ok(())
699 }
700
701 fn get_polarity(&self) -> ClockPolarity {
702 let regs = self.registers;
703
704 match regs.config_opts.read(conf_opts::CPOL_0) {
705 0 => ClockPolarity::IdleLow,
706 1 => ClockPolarity::IdleHigh,
707 _ => unreachable!(),
708 }
709 }
710
711 fn set_phase(&self, phase: ClockPhase) -> Result<(), ErrorCode> {
712 let regs = self.registers;
713 match phase {
714 ClockPhase::SampleLeading => regs.config_opts.modify(conf_opts::CPHA_0::CLEAR),
715 ClockPhase::SampleTrailing => regs.config_opts.modify(conf_opts::CPHA_0::SET),
716 }
717 Ok(())
718 }
719
720 fn get_phase(&self) -> ClockPhase {
721 let regs = self.registers;
722
723 match regs.config_opts.read(conf_opts::CPHA_0) {
724 1 => ClockPhase::SampleTrailing,
725 0 => ClockPhase::SampleLeading,
726 _ => unreachable!(),
727 }
728 }
729
730 fn hold_low(&self) {
733 unimplemented!("spi_host: does not support hold low");
734 }
735
736 fn release_low(&self) {
739 unimplemented!("spi_host: does not support release low");
740 }
741}