capsules_core/spi_controller.rs
1// Licensed under the Apache License, Version 2.0 or the MIT License.
2// SPDX-License-Identifier: Apache-2.0 OR MIT
3// Copyright Tock Contributors 2022.
4
5//! Provides userspace applications with the ability to communicate over the SPI
6//! bus.
7
8use core::cell::Cell;
9use core::cmp;
10
11use kernel::grant::{AllowRoCount, AllowRwCount, Grant, GrantKernelData, UpcallCount};
12use kernel::hil::spi::ClockPhase;
13use kernel::hil::spi::ClockPolarity;
14use kernel::hil::spi::{SpiMasterClient, SpiMasterDevice};
15use kernel::processbuffer::{ReadableProcessBuffer, WriteableProcessBuffer};
16use kernel::syscall::{CommandReturn, SyscallDriver};
17use kernel::utilities::cells::{MapCell, OptionalCell};
18use kernel::utilities::leasable_buffer::SubSliceMut;
19use kernel::{ErrorCode, ProcessId};
20
21/// Syscall driver number.
22use crate::driver;
23pub const DRIVER_NUM: usize = driver::NUM::Spi as usize;
24
25/// Ids for read-only allow buffers
26mod ro_allow {
27 pub const WRITE: usize = 0;
28 /// The number of allow buffers the kernel stores for this grant
29 pub const COUNT: u8 = 1;
30}
31
32/// Ids for read-write allow buffers
33mod rw_allow {
34 pub const READ: usize = 0;
35 /// The number of allow buffers the kernel stores for this grant
36 pub const COUNT: u8 = 1;
37}
38
39/// Suggested length for the Spi read and write buffer
40pub const DEFAULT_READ_BUF_LENGTH: usize = 1024;
41pub const DEFAULT_WRITE_BUF_LENGTH: usize = 1024;
42
43// SPI operations are handled by coping into a kernel buffer for
44// writes and copying out of a kernel buffer for reads.
45//
46// If the application buffer is larger than the kernel buffer,
47// the driver issues multiple HAL operations. The len field
48// of an application keeps track of the length of the desired
49// operation, while the index variable keeps track of the
50// index an ongoing operation is at in the buffers.
51
52#[derive(Default)]
53pub struct App {
54 len: usize,
55 index: usize,
56}
57
58pub struct Spi<'a, S: SpiMasterDevice<'a>> {
59 spi_master: &'a S,
60 busy: Cell<bool>,
61 kernel_read: MapCell<SubSliceMut<'static, u8>>,
62 kernel_write: MapCell<SubSliceMut<'static, u8>>,
63 kernel_len: Cell<usize>,
64 grants: Grant<
65 App,
66 UpcallCount<1>,
67 AllowRoCount<{ ro_allow::COUNT }>,
68 AllowRwCount<{ rw_allow::COUNT }>,
69 >,
70 current_process: OptionalCell<ProcessId>,
71 command: Cell<UserCommand>,
72}
73
74#[derive(Debug, Clone, Copy)]
75enum UserCommand {
76 ReadBytes,
77 InplaceReadWriteBytes,
78}
79
80impl<'a, S: SpiMasterDevice<'a>> Spi<'a, S> {
81 pub fn new(
82 spi_master: &'a S,
83 grants: Grant<
84 App,
85 UpcallCount<1>,
86 AllowRoCount<{ ro_allow::COUNT }>,
87 AllowRwCount<{ rw_allow::COUNT }>,
88 >,
89 ) -> Spi<'a, S> {
90 Spi {
91 spi_master,
92 busy: Cell::new(false),
93 kernel_len: Cell::new(0),
94 kernel_read: MapCell::empty(),
95 kernel_write: MapCell::empty(),
96 grants,
97 current_process: OptionalCell::empty(),
98 command: Cell::new(UserCommand::ReadBytes),
99 }
100 }
101
102 pub fn config_buffers(&self, read: &'static mut [u8], write: &'static mut [u8]) {
103 let len = cmp::min(read.len(), write.len());
104 self.kernel_len.set(len);
105 self.kernel_read.replace(read.into());
106 self.kernel_write.replace(write.into());
107 }
108
109 // Assumes checks for busy/etc. already done
110 // Updates app.index to be index + length of op
111 fn do_next_read_write(&self, app: &mut App, kernel_data: &GrantKernelData) {
112 let write_len = self.kernel_write.map_or(0, |kwbuf| {
113 let mut start = app.index;
114 let tmp_len = kernel_data
115 .get_readonly_processbuffer(ro_allow::WRITE)
116 .and_then(|write| {
117 write.enter(|src| {
118 let len = cmp::min(app.len - start, self.kernel_len.get());
119 let end = cmp::min(start + len, src.len());
120 start = cmp::min(start, end);
121
122 for (i, c) in src[start..end].iter().enumerate() {
123 kwbuf[i] = c.get();
124 }
125 end - start
126 })
127 })
128 .unwrap_or(0);
129 app.index = start + tmp_len;
130 tmp_len
131 });
132
133 let rlen = kernel_data
134 .get_readwrite_processbuffer(rw_allow::READ)
135 .map_or(0, |read| read.len());
136
137 // TODO verify SPI return value
138 let _ = if rlen == 0 {
139 let mut kwbuf = self
140 .kernel_write
141 .take()
142 .unwrap_or((&mut [] as &'static mut [u8]).into());
143 kwbuf.slice(0..write_len);
144 self.spi_master.read_write_bytes(kwbuf, None)
145 } else if write_len == 0 {
146 let read_len = self
147 .kernel_write
148 .map_or(0, |kwbuf| match self.command.get() {
149 UserCommand::ReadBytes => {
150 kwbuf[..].fill(0xFF);
151
152 cmp::min(kwbuf.len(), rlen)
153 }
154 UserCommand::InplaceReadWriteBytes => kernel_data
155 .get_readwrite_processbuffer(rw_allow::READ)
156 .and_then(|read| {
157 read.mut_enter(|src| {
158 let length = cmp::min(kwbuf.len(), rlen);
159
160 let start = app.index;
161 let end = cmp::min(app.index + length, src.len());
162
163 for (i, c) in src[start..end].iter().enumerate() {
164 kwbuf[i] = c.get();
165 }
166
167 length
168 })
169 })
170 .unwrap_or(0),
171 });
172 app.index += read_len;
173 let kwbuf = self
174 .kernel_write
175 .take()
176 .unwrap_or((&mut [] as &'static mut [u8]).into());
177 if let Some(mut krbuf) = self.kernel_read.take() {
178 krbuf.slice(0..read_len);
179 self.spi_master.read_write_bytes(kwbuf, Some(krbuf))
180 } else {
181 self.spi_master.read_write_bytes(kwbuf, None)
182 }
183 } else {
184 let mut kwbuf = self
185 .kernel_write
186 .take()
187 .unwrap_or((&mut [] as &'static mut [u8]).into());
188 kwbuf.slice(0..write_len);
189 if let Some(mut krbuf) = self.kernel_read.take() {
190 krbuf.slice(0..rlen);
191 self.spi_master.read_write_bytes(kwbuf, Some(krbuf))
192 } else {
193 self.spi_master.read_write_bytes(kwbuf, None)
194 }
195 };
196 }
197}
198
199impl<'a, S: SpiMasterDevice<'a>> SyscallDriver for Spi<'a, S> {
200 // 0: driver existence check
201 // 2: read/write buffers
202 // - requires write buffer registered with allow
203 // - read buffer optional
204 // 3: set chip select
205 // - selects which peripheral (CS line) the SPI should
206 // activate
207 // - valid values are 0-3 for SAM4L
208 // - invalid value will result in CS 0
209 // 4: get chip select
210 // - returns current selected peripheral
211 // 5: set rate on current peripheral
212 // - parameter in bps
213 // 6: get rate on current peripheral
214 // - value in bps
215 // 7: set clock phase on current peripheral
216 // - 0 is sample leading
217 // - non-zero is sample trailing
218 // 8: get clock phase on current peripheral
219 // - 0 is sample leading
220 // - non-zero is sample trailing
221 // 9: set clock polarity on current peripheral
222 // - 0 is idle low
223 // - non-zero is idle high
224 // 10: get clock polarity on current peripheral
225 // - 0 is idle low
226 // - non-zero is idle high
227 // 11: read buffers
228 // - read buffer required
229 // 12: inplace read/write buffers
230 // - requires read buffer registered with allow
231 // - write buffer not supported
232 //
233 // x: lock spi
234 // - if you perform an operation without the lock,
235 // it implicitly acquires the lock before the
236 // operation and releases it after
237 // - while an app holds the lock no other app can issue
238 // operations on SPI (they are buffered)
239 // x+1: unlock spi
240 // - does nothing if lock not held
241 //
242 fn command(
243 &self,
244 command_num: usize,
245 arg1: usize,
246 _: usize,
247 process_id: ProcessId,
248 ) -> CommandReturn {
249 if command_num == 0 {
250 // Handle unconditional driver existence check.
251 return CommandReturn::success();
252 }
253
254 // Check if this driver is free, or already dedicated to this process.
255 let match_or_empty_or_nonexistant = self.current_process.map_or(true, |current_process| {
256 self.grants
257 .enter(current_process, |_, _| current_process == process_id)
258 .unwrap_or(true)
259 });
260 if match_or_empty_or_nonexistant {
261 self.current_process.set(process_id);
262 } else {
263 return CommandReturn::failure(ErrorCode::NOMEM);
264 }
265
266 match command_num {
267 // No longer supported, wrap inside a read_write_bytes
268 1 => {
269 // read_write_byte
270 CommandReturn::failure(ErrorCode::NOSUPPORT)
271 }
272 2 => {
273 // read_write_bytes
274 if self.busy.get() {
275 return CommandReturn::failure(ErrorCode::BUSY);
276 }
277 self.grants
278 .enter(process_id, |app, kernel_data| {
279 // When we do a read/write, the read part is optional.
280 // So there are three cases:
281 // 1) Write and read buffers present: len is min of lengths
282 // 2) Only write buffer present: len is len of write
283 // 3) No write buffer present: no operation
284 let wlen = kernel_data
285 .get_readonly_processbuffer(ro_allow::WRITE)
286 .map_or(0, |write| write.len());
287 let rlen = kernel_data
288 .get_readwrite_processbuffer(rw_allow::READ)
289 .map_or(0, |read| read.len());
290 // Note that non-shared and 0-sized read buffers both report 0 as size
291 let len = if rlen == 0 { wlen } else { wlen.min(rlen) };
292
293 if len >= arg1 && arg1 > 0 {
294 app.len = arg1;
295 app.index = 0;
296 self.busy.set(true);
297 self.do_next_read_write(app, kernel_data);
298 CommandReturn::success()
299 } else {
300 /* write buffer too small, or zero length write */
301 CommandReturn::failure(ErrorCode::INVAL)
302 }
303 })
304 .unwrap_or(CommandReturn::failure(ErrorCode::FAIL))
305 }
306 3 => {
307 // set chip select
308 // XXX: TODO: do nothing, for now, until we fix interface
309 // so virtual instances can use multiple chip selects
310 CommandReturn::failure(ErrorCode::NOSUPPORT)
311 }
312 4 => {
313 // get chip select *
314 // XXX: We don't really know what chip select is being used
315 // since we can't set it. Return error until set chip select
316 // works.
317 CommandReturn::failure(ErrorCode::NOSUPPORT)
318 }
319 5 => {
320 // set baud rate
321 match self.spi_master.set_rate(arg1 as u32) {
322 Ok(()) => CommandReturn::success(),
323 Err(error) => CommandReturn::failure(error),
324 }
325 }
326 6 => {
327 // get baud rate
328 CommandReturn::success_u32(self.spi_master.get_rate())
329 }
330 7 => {
331 // set phase
332 match match arg1 {
333 0 => self.spi_master.set_phase(ClockPhase::SampleLeading),
334 _ => self.spi_master.set_phase(ClockPhase::SampleTrailing),
335 } {
336 Ok(()) => CommandReturn::success(),
337 Err(error) => CommandReturn::failure(error),
338 }
339 }
340 8 => {
341 // get phase
342 CommandReturn::success_u32(self.spi_master.get_phase() as u32)
343 }
344 9 => {
345 // set polarity
346 match match arg1 {
347 0 => self.spi_master.set_polarity(ClockPolarity::IdleLow),
348 _ => self.spi_master.set_polarity(ClockPolarity::IdleHigh),
349 } {
350 Ok(()) => CommandReturn::success(),
351 Err(error) => CommandReturn::failure(error),
352 }
353 }
354 10 => {
355 // get polarity
356 CommandReturn::success_u32(self.spi_master.get_polarity() as u32)
357 }
358 11 => {
359 // read_bytes
360 // write 0xFF to the SPI bus and return the read values to
361 // userspace
362 if self.busy.get() {
363 return CommandReturn::failure(ErrorCode::BUSY);
364 }
365 self.grants
366 .enter(process_id, |app, kernel_data| {
367 // When we do a read, we just write 0xFF on the bus.
368 let rlen = kernel_data
369 .get_readwrite_processbuffer(rw_allow::READ)
370 .map_or(0, |read| read.len());
371
372 if rlen >= arg1 && rlen > 0 {
373 app.len = arg1;
374 app.index = 0;
375 self.busy.set(true);
376 self.command.set(UserCommand::ReadBytes);
377 self.do_next_read_write(app, kernel_data);
378 CommandReturn::success()
379 } else {
380 /* write buffer too small, or zero length write */
381 CommandReturn::failure(ErrorCode::INVAL)
382 }
383 })
384 .unwrap_or(CommandReturn::failure(ErrorCode::FAIL))
385 }
386 12 => {
387 // inplace read_write_bytes
388 if self.busy.get() {
389 return CommandReturn::failure(ErrorCode::BUSY);
390 }
391 self.grants
392 .enter(process_id, |app, kernel_data| {
393 let rlen = kernel_data
394 .get_readwrite_processbuffer(rw_allow::READ)
395 .map_or(0, |read| read.len());
396
397 if rlen >= arg1 && arg1 > 0 {
398 app.len = arg1;
399 app.index = 0;
400 self.busy.set(true);
401 self.command.set(UserCommand::InplaceReadWriteBytes);
402 self.do_next_read_write(app, kernel_data);
403 CommandReturn::success()
404 } else {
405 /* write buffer too small, or zero length write */
406 CommandReturn::failure(ErrorCode::INVAL)
407 }
408 })
409 .unwrap_or(CommandReturn::failure(ErrorCode::FAIL))
410 }
411 _ => CommandReturn::failure(ErrorCode::NOSUPPORT),
412 }
413 }
414
415 fn allocate_grant(&self, processid: ProcessId) -> Result<(), kernel::process::Error> {
416 self.grants.enter(processid, |_, _| {})
417 }
418}
419
420impl<'a, S: SpiMasterDevice<'a>> SpiMasterClient for Spi<'a, S> {
421 fn read_write_done(
422 &self,
423 mut writebuf: SubSliceMut<'static, u8>,
424 readbuf: Option<SubSliceMut<'static, u8>>,
425 status: Result<usize, ErrorCode>,
426 ) {
427 self.current_process.map(|process_id| {
428 let _ = self.grants.enter(process_id, move |app, kernel_data| {
429 let rbuf = readbuf.inspect(|src| {
430 let index = app.index;
431 let _ = kernel_data
432 .get_readwrite_processbuffer(rw_allow::READ)
433 .and_then(|read| {
434 read.mut_enter(|dest| {
435 // Need to be careful that app_read hasn't changed
436 // under us, so check all values against actual
437 // slice lengths.
438 //
439 // If app_read is shorter than before, and shorter
440 // than what we have read would require, then truncate.
441 // -pal 12/9/20
442 let end = index;
443 let start = index - status.unwrap_or(0);
444 let end = cmp::min(end, dest.len());
445
446 // If the new endpoint is earlier than our expected
447 // startpoint, we set the startpoint to be the same;
448 // This results in a zero-length operation. -pal 12/9/20
449 let start = cmp::min(start, end);
450
451 // The amount to copy can't be longer than the size of the
452 // read buffer. -pal 6/8/21
453 let real_len = cmp::min(end - start, src.len());
454 let dest_area = &dest[start..end];
455 for (i, c) in src[0..real_len].iter().enumerate() {
456 dest_area[i].set(*c);
457 }
458 })
459 });
460 });
461
462 if let Some(mut rb) = rbuf {
463 rb.reset();
464 self.kernel_read.put(rb);
465 }
466
467 writebuf.reset();
468 self.kernel_write.replace(writebuf);
469
470 if app.index == app.len {
471 self.busy.set(false);
472 let len = app.len;
473 app.len = 0;
474 app.index = 0;
475 kernel_data.schedule_upcall(0, (len, 0, 0)).ok();
476 } else {
477 self.do_next_read_write(app, kernel_data);
478 }
479 });
480 });
481 }
482}