1 /*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <assert.h>
18 #include <lib/spi/client/spi.h>
19 #include <lib/tipc/tipc.h>
20 #include <lk/compiler.h>
21 #include <lk/macros.h>
22 #include <stdbool.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <sys/auxv.h>
26 #include <trusty/memref.h>
27 #include <uapi/err.h>
28 #include <uapi/mm.h>
29
30 #define TLOG_TAG "spi-client"
31 #include <trusty_log.h>
32
33 #define PAGE_SIZE getauxval(AT_PAGESZ)
34
35 /**
36 * Size of the largest SPI request argument structure. Needs to be updated if we
37 * add larger SPI arguments.
38 */
39 #define SPI_CMD_SHM_ARGS_MAX_SIZE sizeof(struct spi_xfer_args)
40
send_shm(struct spi_dev * dev,struct spi_msg_req * req,struct spi_shm_map_req * shm_req,handle_t memref)41 static int send_shm(struct spi_dev* dev,
42 struct spi_msg_req* req,
43 struct spi_shm_map_req* shm_req,
44 handle_t memref) {
45 int rc;
46 struct iovec iovs[2] = {
47 {
48 .iov_base = req,
49 .iov_len = sizeof(*req),
50 },
51 {
52 .iov_base = shm_req,
53 .iov_len = sizeof(*shm_req),
54 },
55 };
56 struct ipc_msg msg = {
57 .iov = iovs,
58 .num_iov = countof(iovs),
59 .handles = &memref,
60 .num_handles = 1,
61 };
62 rc = send_msg(dev->h, &msg);
63 if (rc < 0) {
64 TLOGE("failed (%d) to send memref\n", rc);
65 return rc;
66 }
67 return NO_ERROR;
68 }
69
handle_shm_resp(handle_t chan)70 static int handle_shm_resp(handle_t chan) {
71 int rc;
72 struct uevent evt;
73 struct spi_msg_resp resp;
74
75 rc = wait(chan, &evt, INFINITE_TIME);
76 if (rc != NO_ERROR) {
77 TLOGE("failed (%d) to wait for reply\n", rc);
78 return rc;
79 }
80
81 rc = tipc_recv1(chan, sizeof(resp), &resp, sizeof(resp));
82 if (rc < 0 || (size_t)rc != sizeof(resp)) {
83 TLOGE("failed (%d) to read reply\n", rc);
84 if (rc >= 0) {
85 rc = ERR_BAD_LEN;
86 }
87 return rc;
88 }
89
90 return translate_srv_err(resp.status);
91 }
92
shm_map(struct spi_dev * dev,void * shm_base,size_t shm_size)93 static int shm_map(struct spi_dev* dev, void* shm_base, size_t shm_size) {
94 int rc;
95 struct spi_msg_req req;
96 struct spi_shm_map_req shm_req;
97
98 /* create memref to send to SPI server */
99 rc = memref_create(shm_base, shm_size,
100 MMAP_FLAG_PROT_READ | MMAP_FLAG_PROT_WRITE);
101 if (rc < 0) {
102 TLOGE("failed (%d) to create memref\n", rc);
103 goto err_memref_create;
104 }
105 handle_t memref = (handle_t)rc;
106
107 /* send memref to SPI server */
108 req.cmd = SPI_CMD_MSG_OP_SHM_MAP;
109 shm_req.len = shm_size;
110 rc = send_shm(dev, &req, &shm_req, memref);
111 if (rc < 0) {
112 TLOGE("failed (%d) to send memref\n", rc);
113 goto err_send_msg;
114 }
115
116 /* handle SPI server's response */
117 rc = handle_shm_resp(dev->h);
118 if (rc != NO_ERROR) {
119 TLOGE("failed (%d) to handle shared memory map response\n", rc);
120 goto err_resp;
121 }
122
123 close(memref);
124 return NO_ERROR;
125
126 err_resp:
127 err_send_msg:
128 close(memref);
129 err_memref_create:
130 return rc;
131 }
132
get_shm_size(size_t max_num_cmds,size_t max_total_payload)133 static inline size_t get_shm_size(size_t max_num_cmds,
134 size_t max_total_payload) {
135 /* account for space taken up by alignment requirements */
136 size_t max_total_align = max_num_cmds * (SPI_CMD_SHM_ALIGN - 1);
137 size_t cmd_size = round_up(sizeof(struct spi_shm_hdr), SPI_CMD_SHM_ALIGN) +
138 round_up(SPI_CMD_SHM_ARGS_MAX_SIZE, SPI_CMD_SHM_ALIGN);
139 size_t shm_size =
140 max_num_cmds * cmd_size + max_total_payload + max_total_align;
141
142 return round_up(shm_size, PAGE_SIZE);
143 }
144
spi_dev_open(struct spi_dev * dev,const char * name,size_t max_num_cmds,size_t max_total_payload)145 int spi_dev_open(struct spi_dev* dev,
146 const char* name,
147 size_t max_num_cmds,
148 size_t max_total_payload) {
149 int rc;
150 void* shm_base;
151 size_t shm_size;
152
153 if (!dev || !name || max_num_cmds == 0) {
154 return ERR_INVALID_ARGS;
155 }
156
157 /* connect to SPI service */
158 rc = tipc_connect(&dev->h, name);
159 if (rc != NO_ERROR) {
160 TLOGE("failed (%d) to connect to service \"%s\"\n", rc, name);
161 goto err_connect;
162 }
163
164 /* allocate shared memory */
165 shm_size = get_shm_size(max_num_cmds, max_total_payload);
166 shm_base = memalign(PAGE_SIZE, shm_size);
167 if (!shm_base) {
168 TLOGE("failed to allocate shared memory, base: %p, size: %zu\n",
169 shm_base, shm_size);
170 rc = ERR_NO_MEMORY;
171 goto err_shm_alloc;
172 }
173
174 /* establish shared memory with SPI server*/
175 rc = shm_map(dev, shm_base, shm_size);
176 if (rc != NO_ERROR) {
177 TLOGE("failed (%d) to send shared memory\n", rc);
178 goto err_shm_send;
179 }
180
181 mb_init(&dev->shm, shm_base, shm_size, SPI_CMD_SHM_ALIGN);
182 mb_resize(&dev->shm, shm_size);
183 dev->max_num_cmds = max_num_cmds;
184 dev->max_total_payload = max_total_payload;
185 spi_clear_cmds(dev);
186 return NO_ERROR;
187
188 err_shm_send:
189 /*
190 * There is no way to free() shared memory safely once SPI server receives
191 * the memref. At this point in the program, we don't know if shm_map() has
192 * successfully sent the shared memory or not. So we leak the memory in case
193 * it was already shared.
194 * TODO: It may be possible to avoid memory leaks using other ways of
195 * allocating shared memory.
196 */
197 err_shm_alloc:
198 close(dev->h);
199 dev->h = INVALID_IPC_HANDLE;
200 err_connect:
201 return rc;
202 }
203
is_initialized(struct spi_dev * dev)204 static inline bool is_initialized(struct spi_dev* dev) {
205 return dev && dev->h != INVALID_IPC_HANDLE;
206 }
207
spi_clear_cmds(struct spi_dev * dev)208 void spi_clear_cmds(struct spi_dev* dev) {
209 assert(is_initialized(dev));
210 mb_rewind_pos(&dev->shm);
211 dev->num_cmds = 0;
212 dev->total_payload = 0;
213 dev->config_err = false;
214 }
215
send_batch_req(struct spi_dev * dev)216 static int send_batch_req(struct spi_dev* dev) {
217 struct spi_msg_req req = {
218 .cmd = SPI_CMD_MSG_OP_BATCH_EXEC,
219 };
220 struct spi_batch_req batch_req = {
221 .len = mb_curr_pos(&dev->shm),
222 .num_cmds = dev->num_cmds,
223 };
224 int rc = tipc_send2(dev->h, &req, sizeof(req), &batch_req,
225 sizeof(batch_req));
226 if (rc < 0 || (size_t)rc != sizeof(req) + sizeof(batch_req)) {
227 TLOGE("failed (%d) to send SPI batch request\n", rc);
228 if (rc >= 0) {
229 rc = ERR_BAD_LEN;
230 }
231 return rc;
232 }
233 return NO_ERROR;
234 }
235
validate_batch_resp(struct spi_batch_resp * batch_resp,struct mem_buf * shm,size_t * failed)236 static int validate_batch_resp(struct spi_batch_resp* batch_resp,
237 struct mem_buf* shm,
238 size_t* failed) {
239 int rc = NO_ERROR;
240 struct spi_shm_hdr* shm_hdr;
241 uint32_t shm_hdr_cmd;
242 uint32_t shm_hdr_status;
243 struct spi_xfer_args* xfer_resp;
244 uint32_t xfer_resp_len;
245
246 /*
247 * length of the response in shared memory must be equal to that of the
248 * request
249 */
250 if (batch_resp->len != mb_curr_pos(shm)) {
251 return ERR_BAD_STATE;
252 }
253
254 mb_rewind_pos(shm);
255
256 while (mb_curr_pos(shm) < batch_resp->len) {
257 shm_hdr = mb_advance_pos(shm, sizeof(*shm_hdr));
258 shm_hdr_cmd = READ_ONCE(shm_hdr->cmd);
259 shm_hdr_status = READ_ONCE(shm_hdr->status);
260
261 if (!(shm_hdr_cmd & SPI_CMD_RESP_BIT)) {
262 TLOGE("invalid response 0x%08x\n", shm_hdr_cmd);
263 return ERR_BAD_STATE;
264 }
265 rc = translate_srv_err(shm_hdr_status);
266 if (rc != NO_ERROR) {
267 return rc;
268 }
269
270 switch (shm_hdr_cmd & SPI_CMD_OP_MASK) {
271 case SPI_CMD_SHM_OP_XFER:
272 /* skip xfer_resp and payload */
273 xfer_resp = mb_advance_pos(shm, sizeof(*xfer_resp));
274 xfer_resp_len = READ_ONCE(xfer_resp->len);
275 mb_advance_pos(shm, xfer_resp_len);
276 break;
277 case SPI_CMD_SHM_OP_CS_ASSERT:
278 case SPI_CMD_SHM_OP_CS_DEASSERT:
279 break;
280 case SPI_CMD_SHM_OP_SET_CLK:
281 /* skip spi_clk_args */
282 mb_advance_pos(shm, sizeof(struct spi_clk_args));
283 break;
284 case SPI_CMD_SHM_OP_DELAY:
285 /* skip spi_delay_args */
286 mb_advance_pos(shm, sizeof(struct spi_delay_args));
287 break;
288 default:
289 TLOGE("cmd 0x%x: unknown command\n", shm_hdr_cmd);
290 return ERR_CMD_UNKNOWN;
291 }
292 (*failed)++;
293 }
294
295 return NO_ERROR;
296 }
297
handle_batch_resp(struct spi_dev * dev,size_t * failed)298 static int handle_batch_resp(struct spi_dev* dev, size_t* failed) {
299 int rc;
300 struct uevent evt;
301 struct spi_msg_resp resp;
302 struct spi_batch_resp batch_resp;
303
304 rc = wait(dev->h, &evt, INFINITE_TIME);
305 if (rc != NO_ERROR) {
306 TLOGE("failed (%d) to wait for batch response\n", rc);
307 return rc;
308 }
309
310 rc = tipc_recv2(dev->h, sizeof(resp) + sizeof(batch_resp), &resp,
311 sizeof(resp), &batch_resp, sizeof(batch_resp));
312 if (rc < 0 || (size_t)rc != sizeof(resp) + sizeof(batch_resp)) {
313 TLOGE("failed (%d) to receive batch response\n", rc);
314 if (rc >= 0) {
315 rc = ERR_BAD_LEN;
316 }
317 return rc;
318 }
319
320 rc = translate_srv_err(resp.status);
321 if (rc != NO_ERROR) {
322 TLOGE("batch request encountered an error\n");
323 *failed = batch_resp.failed;
324 return rc;
325 }
326
327 return validate_batch_resp(&batch_resp, &dev->shm, failed);
328 }
329
spi_exec_cmds(struct spi_dev * dev,size_t * failed)330 int spi_exec_cmds(struct spi_dev* dev, size_t* failed) {
331 int rc;
332 size_t fake_failed;
333
334 if (!is_initialized(dev)) {
335 return ERR_INVALID_ARGS;
336 }
337
338 if (!failed) {
339 failed = &fake_failed;
340 }
341 *failed = 0;
342
343 if (dev->config_err) {
344 rc = ERR_BAD_STATE;
345 *failed = dev->num_cmds;
346 goto out;
347 }
348
349 rc = send_batch_req(dev);
350 if (rc != NO_ERROR) {
351 goto out;
352 }
353
354 rc = handle_batch_resp(dev, failed);
355
356 out:
357 /* reset SPI requests */
358 spi_clear_cmds(dev);
359 return rc;
360 }
361
spi_add_cmd(struct spi_dev * dev,uint32_t cmd,void ** args,size_t args_len,void ** payload,size_t payload_len)362 static int spi_add_cmd(struct spi_dev* dev,
363 uint32_t cmd,
364 void** args,
365 size_t args_len,
366 void** payload,
367 size_t payload_len) {
368 int rc;
369 struct spi_shm_hdr* shm_hdr;
370
371 assert(args || !args_len);
372 assert(payload || !payload_len);
373 assert(args_len <= SPI_CMD_SHM_ARGS_MAX_SIZE);
374
375 if (!is_initialized(dev)) {
376 rc = ERR_BAD_HANDLE;
377 goto err_init;
378 }
379 if (dev->config_err) {
380 rc = ERR_BAD_STATE;
381 goto err_config;
382 }
383 if (dev->num_cmds >= dev->max_num_cmds) {
384 rc = ERR_OUT_OF_RANGE;
385 goto err_range;
386 }
387
388 shm_hdr = mb_advance_pos(&dev->shm, sizeof(*shm_hdr));
389 if (!shm_hdr) {
390 rc = ERR_TOO_BIG;
391 goto err_shm_hdr;
392 }
393 WRITE_ONCE(shm_hdr->cmd, cmd);
394 WRITE_ONCE(shm_hdr->status, 0);
395
396 if (args) {
397 *args = mb_advance_pos(&dev->shm, args_len);
398 if (!*args) {
399 rc = ERR_TOO_BIG;
400 goto err_args;
401 }
402 }
403 if (payload) {
404 assert(dev->total_payload <= dev->max_total_payload);
405 if (payload_len > dev->max_total_payload - dev->total_payload) {
406 rc = ERR_TOO_BIG;
407 goto err_payload;
408 }
409 dev->total_payload += payload_len;
410
411 *payload = mb_advance_pos(&dev->shm, payload_len);
412 assert(*payload);
413 }
414
415 dev->num_cmds++;
416 return NO_ERROR;
417
418 err_payload:
419 *args = NULL;
420 err_args:
421 err_shm_hdr:
422 err_range:
423 dev->config_err = true;
424 err_config:
425 err_init:
426 return rc;
427 }
428
spi_add_data_xfer_cmd(struct spi_dev * dev,void ** tx,void ** rx,size_t len)429 int spi_add_data_xfer_cmd(struct spi_dev* dev,
430 void** tx,
431 void** rx,
432 size_t len) {
433 int rc;
434 struct spi_xfer_args* args;
435 uint32_t flags;
436 void* payload;
437
438 rc = spi_add_cmd(dev, SPI_CMD_SHM_OP_XFER, (void**)&args, sizeof(*args),
439 &payload, len);
440 if (rc != NO_ERROR) {
441 return rc;
442 }
443
444 flags = (tx ? SPI_XFER_FLAGS_TX : 0) | (rx ? SPI_XFER_FLAGS_RX : 0);
445 WRITE_ONCE(args->len, len);
446 WRITE_ONCE(args->flags, flags);
447
448 if (tx) {
449 *tx = payload;
450 }
451 if (rx) {
452 *rx = payload;
453 }
454
455 return NO_ERROR;
456 }
457
spi_add_cs_assert_cmd(struct spi_dev * dev)458 int spi_add_cs_assert_cmd(struct spi_dev* dev) {
459 return spi_add_cmd(dev, SPI_CMD_SHM_OP_CS_ASSERT, NULL, 0, NULL, 0);
460 }
461
spi_add_cs_deassert_cmd(struct spi_dev * dev)462 int spi_add_cs_deassert_cmd(struct spi_dev* dev) {
463 return spi_add_cmd(dev, SPI_CMD_SHM_OP_CS_DEASSERT, NULL, 0, NULL, 0);
464 }
465
spi_add_set_clk_cmd(struct spi_dev * dev,uint64_t clk_hz_in,uint64_t ** clk_hz_out)466 int spi_add_set_clk_cmd(struct spi_dev* dev,
467 uint64_t clk_hz_in,
468 uint64_t** clk_hz_out) {
469 int rc;
470 struct spi_clk_args* args;
471
472 rc = spi_add_cmd(dev, SPI_CMD_SHM_OP_SET_CLK, (void**)&args, sizeof(*args),
473 NULL, 0);
474 if (rc != NO_ERROR) {
475 return rc;
476 }
477
478 WRITE_ONCE(args->clk_hz, clk_hz_in);
479
480 if (clk_hz_out) {
481 *clk_hz_out = &args->clk_hz;
482 }
483
484 return NO_ERROR;
485 }
486
spi_add_delay_cmd(struct spi_dev * dev,uint64_t delay_ns)487 int spi_add_delay_cmd(struct spi_dev* dev, uint64_t delay_ns) {
488 int rc;
489 struct spi_delay_args* args;
490
491 rc = spi_add_cmd(dev, SPI_CMD_SHM_OP_DELAY, (void**)&args, sizeof(*args),
492 NULL, 0);
493 if (rc != NO_ERROR) {
494 return rc;
495 }
496
497 WRITE_ONCE(args->delay_ns, delay_ns);
498
499 return NO_ERROR;
500 }
501