Branch data Line data Source code
1 : : // SPDX-License-Identifier: GPL-2.0+
2 : : /*
3 : : * Copyright (C) 2018 Exceet Electronics GmbH
4 : : * Copyright (C) 2018 Bootlin
5 : : *
6 : : * Author: Boris Brezillon <boris.brezillon@bootlin.com>
7 : : */
8 : : #include <linux/dmaengine.h>
9 : : #include <linux/pm_runtime.h>
10 : : #include <linux/spi/spi.h>
11 : : #include <linux/spi/spi-mem.h>
12 : :
13 : : #include "internals.h"
14 : :
15 : : #define SPI_MEM_MAX_BUSWIDTH 8
16 : :
17 : : /**
18 : : * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
19 : : * memory operation
20 : : * @ctlr: the SPI controller requesting this dma_map()
21 : : * @op: the memory operation containing the buffer to map
22 : : * @sgt: a pointer to a non-initialized sg_table that will be filled by this
23 : : * function
24 : : *
25 : : * Some controllers might want to do DMA on the data buffer embedded in @op.
26 : : * This helper prepares everything for you and provides a ready-to-use
27 : : * sg_table. This function is not intended to be called from spi drivers.
28 : : * Only SPI controller drivers should use it.
29 : : * Note that the caller must ensure the memory region pointed by
30 : : * op->data.buf.{in,out} is DMA-able before calling this function.
31 : : *
32 : : * Return: 0 in case of success, a negative error code otherwise.
33 : : */
34 : 0 : int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
35 : : const struct spi_mem_op *op,
36 : : struct sg_table *sgt)
37 : : {
38 : : struct device *dmadev;
39 : :
40 [ # # ]: 0 : if (!op->data.nbytes)
41 : : return -EINVAL;
42 : :
43 [ # # # # ]: 0 : if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
44 : 0 : dmadev = ctlr->dma_tx->device->dev;
45 [ # # # # ]: 0 : else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
46 : 0 : dmadev = ctlr->dma_rx->device->dev;
47 : : else
48 : 0 : dmadev = ctlr->dev.parent;
49 : :
50 [ # # ]: 0 : if (!dmadev)
51 : : return -EINVAL;
52 : :
53 [ # # ]: 0 : return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
54 : : op->data.dir == SPI_MEM_DATA_IN ?
55 : : DMA_FROM_DEVICE : DMA_TO_DEVICE);
56 : : }
57 : : EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
58 : :
59 : : /**
60 : : * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
61 : : * memory operation
62 : : * @ctlr: the SPI controller requesting this dma_unmap()
63 : : * @op: the memory operation containing the buffer to unmap
64 : : * @sgt: a pointer to an sg_table previously initialized by
65 : : * spi_controller_dma_map_mem_op_data()
66 : : *
67 : : * Some controllers might want to do DMA on the data buffer embedded in @op.
68 : : * This helper prepares things so that the CPU can access the
69 : : * op->data.buf.{in,out} buffer again.
70 : : *
71 : : * This function is not intended to be called from SPI drivers. Only SPI
72 : : * controller drivers should use it.
73 : : *
74 : : * This function should be called after the DMA operation has finished and is
75 : : * only valid if the previous spi_controller_dma_map_mem_op_data() call
76 : : * returned 0.
77 : : *
78 : : * Return: 0 in case of success, a negative error code otherwise.
79 : : */
80 : 0 : void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
81 : : const struct spi_mem_op *op,
82 : : struct sg_table *sgt)
83 : : {
84 : : struct device *dmadev;
85 : :
86 [ # # ]: 0 : if (!op->data.nbytes)
87 : 0 : return;
88 : :
89 [ # # # # ]: 0 : if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
90 : 0 : dmadev = ctlr->dma_tx->device->dev;
91 [ # # # # ]: 0 : else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
92 : 0 : dmadev = ctlr->dma_rx->device->dev;
93 : : else
94 : 0 : dmadev = ctlr->dev.parent;
95 : :
96 [ # # ]: 0 : spi_unmap_buf(ctlr, dmadev, sgt,
97 : : op->data.dir == SPI_MEM_DATA_IN ?
98 : : DMA_FROM_DEVICE : DMA_TO_DEVICE);
99 : : }
100 : : EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
101 : :
102 : 0 : static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
103 : : {
104 : 0 : u32 mode = mem->spi->mode;
105 : :
106 [ # # # # : 0 : switch (buswidth) {
# ]
107 : : case 1:
108 : : return 0;
109 : :
110 : : case 2:
111 [ # # # # ]: 0 : if ((tx &&
112 [ # # ]: 0 : (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
113 [ # # ]: 0 : (!tx &&
114 : 0 : (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
115 : : return 0;
116 : :
117 : : break;
118 : :
119 : : case 4:
120 [ # # # # : 0 : if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
# # ]
121 [ # # ]: 0 : (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
122 : : return 0;
123 : :
124 : : break;
125 : :
126 : : case 8:
127 [ # # # # : 0 : if ((tx && (mode & SPI_TX_OCTAL)) ||
# # ]
128 [ # # ]: 0 : (!tx && (mode & SPI_RX_OCTAL)))
129 : : return 0;
130 : :
131 : : break;
132 : :
133 : : default:
134 : : break;
135 : : }
136 : :
137 : 0 : return -ENOTSUPP;
138 : : }
139 : :
140 : 0 : bool spi_mem_default_supports_op(struct spi_mem *mem,
141 : : const struct spi_mem_op *op)
142 : : {
143 [ # # ]: 0 : if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
144 : : return false;
145 : :
146 [ # # # # ]: 0 : if (op->addr.nbytes &&
147 : 0 : spi_check_buswidth_req(mem, op->addr.buswidth, true))
148 : : return false;
149 : :
150 [ # # # # ]: 0 : if (op->dummy.nbytes &&
151 : 0 : spi_check_buswidth_req(mem, op->dummy.buswidth, true))
152 : : return false;
153 : :
154 [ # # # # ]: 0 : if (op->data.dir != SPI_MEM_NO_DATA &&
155 : 0 : spi_check_buswidth_req(mem, op->data.buswidth,
156 : : op->data.dir == SPI_MEM_DATA_OUT))
157 : : return false;
158 : :
159 : 0 : return true;
160 : : }
161 : : EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
162 : :
163 : 0 : static bool spi_mem_buswidth_is_valid(u8 buswidth)
164 : : {
165 [ # # # # : 0 : if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
# # # # ]
166 : : return false;
167 : :
168 : : return true;
169 : : }
170 : :
171 : 0 : static int spi_mem_check_op(const struct spi_mem_op *op)
172 : : {
173 [ # # ]: 0 : if (!op->cmd.buswidth)
174 : : return -EINVAL;
175 : :
176 [ # # # # : 0 : if ((op->addr.nbytes && !op->addr.buswidth) ||
# # ]
177 [ # # # # ]: 0 : (op->dummy.nbytes && !op->dummy.buswidth) ||
178 [ # # ]: 0 : (op->data.nbytes && !op->data.buswidth))
179 : : return -EINVAL;
180 : :
181 [ # # # # ]: 0 : if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
182 [ # # ]: 0 : !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
183 [ # # ]: 0 : !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
184 : 0 : !spi_mem_buswidth_is_valid(op->data.buswidth))
185 : : return -EINVAL;
186 : :
187 : : return 0;
188 : : }
189 : :
190 : 0 : static bool spi_mem_internal_supports_op(struct spi_mem *mem,
191 : : const struct spi_mem_op *op)
192 : : {
193 : 0 : struct spi_controller *ctlr = mem->spi->controller;
194 : :
195 [ # # # # ]: 0 : if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
196 : 0 : return ctlr->mem_ops->supports_op(mem, op);
197 : :
198 : 0 : return spi_mem_default_supports_op(mem, op);
199 : : }
200 : :
201 : : /**
202 : : * spi_mem_supports_op() - Check if a memory device and the controller it is
203 : : * connected to support a specific memory operation
204 : : * @mem: the SPI memory
205 : : * @op: the memory operation to check
206 : : *
207 : : * Some controllers are only supporting Single or Dual IOs, others might only
208 : : * support specific opcodes, or it can even be that the controller and device
209 : : * both support Quad IOs but the hardware prevents you from using it because
210 : : * only 2 IO lines are connected.
211 : : *
212 : : * This function checks whether a specific operation is supported.
213 : : *
214 : : * Return: true if @op is supported, false otherwise.
215 : : */
216 : 0 : bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
217 : : {
218 [ # # ]: 0 : if (spi_mem_check_op(op))
219 : : return false;
220 : :
221 : 0 : return spi_mem_internal_supports_op(mem, op);
222 : : }
223 : : EXPORT_SYMBOL_GPL(spi_mem_supports_op);
224 : :
225 : 0 : static int spi_mem_access_start(struct spi_mem *mem)
226 : : {
227 : 0 : struct spi_controller *ctlr = mem->spi->controller;
228 : :
229 : : /*
230 : : * Flush the message queue before executing our SPI memory
231 : : * operation to prevent preemption of regular SPI transfers.
232 : : */
233 : 0 : spi_flush_queue(ctlr);
234 : :
235 [ # # ]: 0 : if (ctlr->auto_runtime_pm) {
236 : : int ret;
237 : :
238 : 0 : ret = pm_runtime_get_sync(ctlr->dev.parent);
239 [ # # ]: 0 : if (ret < 0) {
240 : 0 : dev_err(&ctlr->dev, "Failed to power device: %d\n",
241 : : ret);
242 : 0 : return ret;
243 : : }
244 : : }
245 : :
246 : 0 : mutex_lock(&ctlr->bus_lock_mutex);
247 : 0 : mutex_lock(&ctlr->io_mutex);
248 : :
249 : 0 : return 0;
250 : : }
251 : :
252 : 0 : static void spi_mem_access_end(struct spi_mem *mem)
253 : : {
254 : 0 : struct spi_controller *ctlr = mem->spi->controller;
255 : :
256 : 0 : mutex_unlock(&ctlr->io_mutex);
257 : 0 : mutex_unlock(&ctlr->bus_lock_mutex);
258 : :
259 [ # # ]: 0 : if (ctlr->auto_runtime_pm)
260 : 0 : pm_runtime_put(ctlr->dev.parent);
261 : 0 : }
262 : :
263 : : /**
264 : : * spi_mem_exec_op() - Execute a memory operation
265 : : * @mem: the SPI memory
266 : : * @op: the memory operation to execute
267 : : *
268 : : * Executes a memory operation.
269 : : *
270 : : * This function first checks that @op is supported and then tries to execute
271 : : * it.
272 : : *
273 : : * Return: 0 in case of success, a negative error code otherwise.
274 : : */
275 : 0 : int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
276 : : {
277 : : unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
278 : 0 : struct spi_controller *ctlr = mem->spi->controller;
279 : 0 : struct spi_transfer xfers[4] = { };
280 : : struct spi_message msg;
281 : : u8 *tmpbuf;
282 : : int ret;
283 : :
284 : 0 : ret = spi_mem_check_op(op);
285 [ # # ]: 0 : if (ret)
286 : : return ret;
287 : :
288 [ # # ]: 0 : if (!spi_mem_internal_supports_op(mem, op))
289 : : return -ENOTSUPP;
290 : :
291 [ # # ]: 0 : if (ctlr->mem_ops) {
292 : 0 : ret = spi_mem_access_start(mem);
293 [ # # ]: 0 : if (ret)
294 : : return ret;
295 : :
296 : 0 : ret = ctlr->mem_ops->exec_op(mem, op);
297 : :
298 : 0 : spi_mem_access_end(mem);
299 : :
300 : : /*
301 : : * Some controllers only optimize specific paths (typically the
302 : : * read path) and expect the core to use the regular SPI
303 : : * interface in other cases.
304 : : */
305 [ # # ]: 0 : if (!ret || ret != -ENOTSUPP)
306 : : return ret;
307 : : }
308 : :
309 : 0 : tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
310 : 0 : op->dummy.nbytes;
311 : :
312 : : /*
313 : : * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
314 : : * we're guaranteed that this buffer is DMA-able, as required by the
315 : : * SPI layer.
316 : : */
317 : 0 : tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
318 [ # # ]: 0 : if (!tmpbuf)
319 : : return -ENOMEM;
320 : :
321 : : spi_message_init(&msg);
322 : :
323 : 0 : tmpbuf[0] = op->cmd.opcode;
324 : 0 : xfers[xferpos].tx_buf = tmpbuf;
325 : 0 : xfers[xferpos].len = sizeof(op->cmd.opcode);
326 : 0 : xfers[xferpos].tx_nbits = op->cmd.buswidth;
327 : : spi_message_add_tail(&xfers[xferpos], &msg);
328 : : xferpos++;
329 : : totalxferlen++;
330 : :
331 [ # # ]: 0 : if (op->addr.nbytes) {
332 : : int i;
333 : :
334 [ # # ]: 0 : for (i = 0; i < op->addr.nbytes; i++)
335 : 0 : tmpbuf[i + 1] = op->addr.val >>
336 : 0 : (8 * (op->addr.nbytes - i - 1));
337 : :
338 : 0 : xfers[xferpos].tx_buf = tmpbuf + 1;
339 : 0 : xfers[xferpos].len = op->addr.nbytes;
340 : 0 : xfers[xferpos].tx_nbits = op->addr.buswidth;
341 : : spi_message_add_tail(&xfers[xferpos], &msg);
342 : : xferpos++;
343 : 0 : totalxferlen += op->addr.nbytes;
344 : : }
345 : :
346 [ # # ]: 0 : if (op->dummy.nbytes) {
347 : 0 : memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
348 : 0 : xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
349 : 0 : xfers[xferpos].len = op->dummy.nbytes;
350 : 0 : xfers[xferpos].tx_nbits = op->dummy.buswidth;
351 : 0 : spi_message_add_tail(&xfers[xferpos], &msg);
352 : 0 : xferpos++;
353 : 0 : totalxferlen += op->dummy.nbytes;
354 : : }
355 : :
356 [ # # ]: 0 : if (op->data.nbytes) {
357 [ # # ]: 0 : if (op->data.dir == SPI_MEM_DATA_IN) {
358 : 0 : xfers[xferpos].rx_buf = op->data.buf.in;
359 : 0 : xfers[xferpos].rx_nbits = op->data.buswidth;
360 : : } else {
361 : 0 : xfers[xferpos].tx_buf = op->data.buf.out;
362 : 0 : xfers[xferpos].tx_nbits = op->data.buswidth;
363 : : }
364 : :
365 : 0 : xfers[xferpos].len = op->data.nbytes;
366 : 0 : spi_message_add_tail(&xfers[xferpos], &msg);
367 : : xferpos++;
368 : 0 : totalxferlen += op->data.nbytes;
369 : : }
370 : :
371 : 0 : ret = spi_sync(mem->spi, &msg);
372 : :
373 : 0 : kfree(tmpbuf);
374 : :
375 [ # # ]: 0 : if (ret)
376 : : return ret;
377 : :
378 [ # # ]: 0 : if (msg.actual_length != totalxferlen)
379 : : return -EIO;
380 : :
381 : 0 : return 0;
382 : : }
383 : : EXPORT_SYMBOL_GPL(spi_mem_exec_op);
384 : :
385 : : /**
386 : : * spi_mem_get_name() - Return the SPI mem device name to be used by the
387 : : * upper layer if necessary
388 : : * @mem: the SPI memory
389 : : *
390 : : * This function allows SPI mem users to retrieve the SPI mem device name.
391 : : * It is useful if the upper layer needs to expose a custom name for
392 : : * compatibility reasons.
393 : : *
394 : : * Return: a string containing the name of the memory device to be used
395 : : * by the SPI mem user
396 : : */
397 : 0 : const char *spi_mem_get_name(struct spi_mem *mem)
398 : : {
399 : 0 : return mem->name;
400 : : }
401 : : EXPORT_SYMBOL_GPL(spi_mem_get_name);
402 : :
403 : : /**
404 : : * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
405 : : * match controller limitations
406 : : * @mem: the SPI memory
407 : : * @op: the operation to adjust
408 : : *
409 : : * Some controllers have FIFO limitations and must split a data transfer
410 : : * operation into multiple ones, others require a specific alignment for
411 : : * optimized accesses. This function allows SPI mem drivers to split a single
412 : : * operation into multiple sub-operations when required.
413 : : *
414 : : * Return: a negative error code if the controller can't properly adjust @op,
415 : : * 0 otherwise. Note that @op->data.nbytes will be updated if @op
416 : : * can't be handled in a single step.
417 : : */
418 : 0 : int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
419 : : {
420 : 0 : struct spi_controller *ctlr = mem->spi->controller;
421 : : size_t len;
422 : :
423 : 0 : len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
424 : :
425 [ # # # # ]: 0 : if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
426 : 0 : return ctlr->mem_ops->adjust_op_size(mem, op);
427 : :
428 [ # # # # ]: 0 : if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
429 [ # # ]: 0 : if (len > spi_max_transfer_size(mem->spi))
430 : : return -EINVAL;
431 : :
432 : 0 : op->data.nbytes = min3((size_t)op->data.nbytes,
433 : : spi_max_transfer_size(mem->spi),
434 : : spi_max_message_size(mem->spi) -
435 : : len);
436 [ # # ]: 0 : if (!op->data.nbytes)
437 : : return -EINVAL;
438 : : }
439 : :
440 : : return 0;
441 : : }
442 : : EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
443 : :
444 : 0 : static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
445 : : u64 offs, size_t len, void *buf)
446 : : {
447 : 0 : struct spi_mem_op op = desc->info.op_tmpl;
448 : : int ret;
449 : :
450 : 0 : op.addr.val = desc->info.offset + offs;
451 : 0 : op.data.buf.in = buf;
452 : 0 : op.data.nbytes = len;
453 : 0 : ret = spi_mem_adjust_op_size(desc->mem, &op);
454 [ # # ]: 0 : if (ret)
455 : : return ret;
456 : :
457 : 0 : ret = spi_mem_exec_op(desc->mem, &op);
458 [ # # ]: 0 : if (ret)
459 : : return ret;
460 : :
461 : 0 : return op.data.nbytes;
462 : : }
463 : :
464 : 0 : static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
465 : : u64 offs, size_t len, const void *buf)
466 : : {
467 : 0 : struct spi_mem_op op = desc->info.op_tmpl;
468 : : int ret;
469 : :
470 : 0 : op.addr.val = desc->info.offset + offs;
471 : 0 : op.data.buf.out = buf;
472 : 0 : op.data.nbytes = len;
473 : 0 : ret = spi_mem_adjust_op_size(desc->mem, &op);
474 [ # # ]: 0 : if (ret)
475 : : return ret;
476 : :
477 : 0 : ret = spi_mem_exec_op(desc->mem, &op);
478 [ # # ]: 0 : if (ret)
479 : : return ret;
480 : :
481 : 0 : return op.data.nbytes;
482 : : }
483 : :
484 : : /**
485 : : * spi_mem_dirmap_create() - Create a direct mapping descriptor
486 : : * @mem: SPI mem device this direct mapping should be created for
487 : : * @info: direct mapping information
488 : : *
489 : : * This function is creating a direct mapping descriptor which can then be used
490 : : * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
491 : : * If the SPI controller driver does not support direct mapping, this function
492 : : * fallback to an implementation using spi_mem_exec_op(), so that the caller
493 : : * doesn't have to bother implementing a fallback on his own.
494 : : *
495 : : * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
496 : : */
497 : : struct spi_mem_dirmap_desc *
498 : 0 : spi_mem_dirmap_create(struct spi_mem *mem,
499 : : const struct spi_mem_dirmap_info *info)
500 : : {
501 : 0 : struct spi_controller *ctlr = mem->spi->controller;
502 : : struct spi_mem_dirmap_desc *desc;
503 : : int ret = -ENOTSUPP;
504 : :
505 : : /* Make sure the number of address cycles is between 1 and 8 bytes. */
506 [ # # ]: 0 : if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
507 : : return ERR_PTR(-EINVAL);
508 : :
509 : : /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
510 [ # # ]: 0 : if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
511 : : return ERR_PTR(-EINVAL);
512 : :
513 : 0 : desc = kzalloc(sizeof(*desc), GFP_KERNEL);
514 [ # # ]: 0 : if (!desc)
515 : : return ERR_PTR(-ENOMEM);
516 : :
517 : 0 : desc->mem = mem;
518 : 0 : desc->info = *info;
519 [ # # # # ]: 0 : if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
520 : 0 : ret = ctlr->mem_ops->dirmap_create(desc);
521 : :
522 [ # # ]: 0 : if (ret) {
523 : 0 : desc->nodirmap = true;
524 [ # # ]: 0 : if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
525 : : ret = -ENOTSUPP;
526 : : else
527 : : ret = 0;
528 : : }
529 : :
530 [ # # ]: 0 : if (ret) {
531 : 0 : kfree(desc);
532 : 0 : return ERR_PTR(ret);
533 : : }
534 : :
535 : : return desc;
536 : : }
537 : : EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
538 : :
539 : : /**
540 : : * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
541 : : * @desc: the direct mapping descriptor to destroy
542 : : *
543 : : * This function destroys a direct mapping descriptor previously created by
544 : : * spi_mem_dirmap_create().
545 : : */
546 : 0 : void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
547 : : {
548 : 0 : struct spi_controller *ctlr = desc->mem->spi->controller;
549 : :
550 [ # # # # : 0 : if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
# # ]
551 : 0 : ctlr->mem_ops->dirmap_destroy(desc);
552 : :
553 : 0 : kfree(desc);
554 : 0 : }
555 : : EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
556 : :
557 : 0 : static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
558 : : {
559 : 0 : struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
560 : :
561 : 0 : spi_mem_dirmap_destroy(desc);
562 : 0 : }
563 : :
564 : : /**
565 : : * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
566 : : * it to a device
567 : : * @dev: device the dirmap desc will be attached to
568 : : * @mem: SPI mem device this direct mapping should be created for
569 : : * @info: direct mapping information
570 : : *
571 : : * devm_ variant of the spi_mem_dirmap_create() function. See
572 : : * spi_mem_dirmap_create() for more details.
573 : : *
574 : : * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
575 : : */
576 : : struct spi_mem_dirmap_desc *
577 : 0 : devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
578 : : const struct spi_mem_dirmap_info *info)
579 : : {
580 : : struct spi_mem_dirmap_desc **ptr, *desc;
581 : :
582 : : ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
583 : : GFP_KERNEL);
584 [ # # ]: 0 : if (!ptr)
585 : : return ERR_PTR(-ENOMEM);
586 : :
587 : 0 : desc = spi_mem_dirmap_create(mem, info);
588 [ # # ]: 0 : if (IS_ERR(desc)) {
589 : 0 : devres_free(ptr);
590 : : } else {
591 : 0 : *ptr = desc;
592 : 0 : devres_add(dev, ptr);
593 : : }
594 : :
595 : 0 : return desc;
596 : : }
597 : : EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
598 : :
599 : 0 : static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
600 : : {
601 : : struct spi_mem_dirmap_desc **ptr = res;
602 : :
603 [ # # # # : 0 : if (WARN_ON(!ptr || !*ptr))
# # # # ]
604 : : return 0;
605 : :
606 : 0 : return *ptr == data;
607 : : }
608 : :
609 : : /**
610 : : * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
611 : : * to a device
612 : : * @dev: device the dirmap desc is attached to
613 : : * @desc: the direct mapping descriptor to destroy
614 : : *
615 : : * devm_ variant of the spi_mem_dirmap_destroy() function. See
616 : : * spi_mem_dirmap_destroy() for more details.
617 : : */
618 : 0 : void devm_spi_mem_dirmap_destroy(struct device *dev,
619 : : struct spi_mem_dirmap_desc *desc)
620 : : {
621 : 0 : devres_release(dev, devm_spi_mem_dirmap_release,
622 : : devm_spi_mem_dirmap_match, desc);
623 : 0 : }
624 : : EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
625 : :
626 : : /**
627 : : * spi_mem_dirmap_read() - Read data through a direct mapping
628 : : * @desc: direct mapping descriptor
629 : : * @offs: offset to start reading from. Note that this is not an absolute
630 : : * offset, but the offset within the direct mapping which already has
631 : : * its own offset
632 : : * @len: length in bytes
633 : : * @buf: destination buffer. This buffer must be DMA-able
634 : : *
635 : : * This function reads data from a memory device using a direct mapping
636 : : * previously instantiated with spi_mem_dirmap_create().
637 : : *
638 : : * Return: the amount of data read from the memory device or a negative error
639 : : * code. Note that the returned size might be smaller than @len, and the caller
640 : : * is responsible for calling spi_mem_dirmap_read() again when that happens.
641 : : */
642 : 0 : ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
643 : : u64 offs, size_t len, void *buf)
644 : : {
645 : 0 : struct spi_controller *ctlr = desc->mem->spi->controller;
646 : : ssize_t ret;
647 : :
648 [ # # ]: 0 : if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
649 : : return -EINVAL;
650 : :
651 [ # # ]: 0 : if (!len)
652 : : return 0;
653 : :
654 [ # # ]: 0 : if (desc->nodirmap) {
655 : 0 : ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
656 [ # # # # ]: 0 : } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
657 : 0 : ret = spi_mem_access_start(desc->mem);
658 [ # # ]: 0 : if (ret)
659 : : return ret;
660 : :
661 : 0 : ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
662 : :
663 : 0 : spi_mem_access_end(desc->mem);
664 : : } else {
665 : : ret = -ENOTSUPP;
666 : : }
667 : :
668 : 0 : return ret;
669 : : }
670 : : EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
671 : :
672 : : /**
673 : : * spi_mem_dirmap_write() - Write data through a direct mapping
674 : : * @desc: direct mapping descriptor
675 : : * @offs: offset to start writing from. Note that this is not an absolute
676 : : * offset, but the offset within the direct mapping which already has
677 : : * its own offset
678 : : * @len: length in bytes
679 : : * @buf: source buffer. This buffer must be DMA-able
680 : : *
681 : : * This function writes data to a memory device using a direct mapping
682 : : * previously instantiated with spi_mem_dirmap_create().
683 : : *
684 : : * Return: the amount of data written to the memory device or a negative error
685 : : * code. Note that the returned size might be smaller than @len, and the caller
686 : : * is responsible for calling spi_mem_dirmap_write() again when that happens.
687 : : */
688 : 0 : ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
689 : : u64 offs, size_t len, const void *buf)
690 : : {
691 : 0 : struct spi_controller *ctlr = desc->mem->spi->controller;
692 : : ssize_t ret;
693 : :
694 [ # # ]: 0 : if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
695 : : return -EINVAL;
696 : :
697 [ # # ]: 0 : if (!len)
698 : : return 0;
699 : :
700 [ # # ]: 0 : if (desc->nodirmap) {
701 : 0 : ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
702 [ # # # # ]: 0 : } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
703 : 0 : ret = spi_mem_access_start(desc->mem);
704 [ # # ]: 0 : if (ret)
705 : : return ret;
706 : :
707 : 0 : ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
708 : :
709 : 0 : spi_mem_access_end(desc->mem);
710 : : } else {
711 : : ret = -ENOTSUPP;
712 : : }
713 : :
714 : 0 : return ret;
715 : : }
716 : : EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
717 : :
718 : : static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
719 : : {
720 : : return container_of(drv, struct spi_mem_driver, spidrv.driver);
721 : : }
722 : :
723 : 0 : static int spi_mem_probe(struct spi_device *spi)
724 : : {
725 : 0 : struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
726 : 0 : struct spi_controller *ctlr = spi->controller;
727 : : struct spi_mem *mem;
728 : :
729 : 0 : mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
730 [ # # ]: 0 : if (!mem)
731 : : return -ENOMEM;
732 : :
733 : 0 : mem->spi = spi;
734 : :
735 [ # # # # ]: 0 : if (ctlr->mem_ops && ctlr->mem_ops->get_name)
736 : 0 : mem->name = ctlr->mem_ops->get_name(mem);
737 : : else
738 : 0 : mem->name = dev_name(&spi->dev);
739 : :
740 [ # # ]: 0 : if (IS_ERR_OR_NULL(mem->name))
741 : 0 : return PTR_ERR(mem->name);
742 : :
743 : : spi_set_drvdata(spi, mem);
744 : :
745 : 0 : return memdrv->probe(mem);
746 : : }
747 : :
748 : 0 : static int spi_mem_remove(struct spi_device *spi)
749 : : {
750 : 0 : struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
751 : : struct spi_mem *mem = spi_get_drvdata(spi);
752 : :
753 [ # # ]: 0 : if (memdrv->remove)
754 : 0 : return memdrv->remove(mem);
755 : :
756 : : return 0;
757 : : }
758 : :
759 : 0 : static void spi_mem_shutdown(struct spi_device *spi)
760 : : {
761 : 0 : struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
762 : : struct spi_mem *mem = spi_get_drvdata(spi);
763 : :
764 [ # # ]: 0 : if (memdrv->shutdown)
765 : 0 : memdrv->shutdown(mem);
766 : 0 : }
767 : :
768 : : /**
769 : : * spi_mem_driver_register_with_owner() - Register a SPI memory driver
770 : : * @memdrv: the SPI memory driver to register
771 : : * @owner: the owner of this driver
772 : : *
773 : : * Registers a SPI memory driver.
774 : : *
775 : : * Return: 0 in case of success, a negative error core otherwise.
776 : : */
777 : :
778 : 0 : int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
779 : : struct module *owner)
780 : : {
781 : 0 : memdrv->spidrv.probe = spi_mem_probe;
782 : 0 : memdrv->spidrv.remove = spi_mem_remove;
783 : 0 : memdrv->spidrv.shutdown = spi_mem_shutdown;
784 : :
785 : 0 : return __spi_register_driver(owner, &memdrv->spidrv);
786 : : }
787 : : EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
788 : :
789 : : /**
790 : : * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
791 : : * @memdrv: the SPI memory driver to unregister
792 : : *
793 : : * Unregisters a SPI memory driver.
794 : : */
795 : 0 : void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
796 : : {
797 : 0 : spi_unregister_driver(&memdrv->spidrv);
798 : 0 : }
799 : : EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
|