Branch data Line data Source code
1 : : /* SPDX-License-Identifier: GPL-2.0 */
2 : : #ifndef ASMARM_DMA_MAPPING_H
3 : : #define ASMARM_DMA_MAPPING_H
4 : :
5 : : #ifdef __KERNEL__
6 : :
7 : : #include <linux/mm_types.h>
8 : : #include <linux/scatterlist.h>
9 : : #include <linux/dma-debug.h>
10 : :
11 : : #include <asm/memory.h>
12 : :
13 : : #include <xen/xen.h>
14 : : #include <asm/xen/hypervisor.h>
15 : :
16 : : extern const struct dma_map_ops arm_dma_ops;
17 : : extern const struct dma_map_ops arm_coherent_dma_ops;
18 : :
19 : : static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
20 : : {
21 : : if (IS_ENABLED(CONFIG_MMU) && !IS_ENABLED(CONFIG_ARM_LPAE))
22 : : return &arm_dma_ops;
23 : : return NULL;
24 : : }
25 : :
26 : : #ifdef __arch_page_to_dma
27 : : #error Please update to __arch_pfn_to_dma
28 : : #endif
29 : :
30 : : /*
31 : : * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
32 : : * functions used internally by the DMA-mapping API to provide DMA
33 : : * addresses. They must not be used by drivers.
34 : : */
35 : : #ifndef __arch_pfn_to_dma
36 : : static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
37 : : {
38 [ + - # # : 3300798 : if (dev)
+ - # # #
# # # #
# ]
39 : 3300798 : pfn -= dev->dma_pfn_offset;
40 : 3300798 : return (dma_addr_t)__pfn_to_bus(pfn);
41 : : }
42 : :
43 : : static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
44 : : {
45 : 3384819 : unsigned long pfn = __bus_to_pfn(addr);
46 : :
47 [ # # + - : 3384819 : if (dev)
# # + - #
# # # # #
# # + - ]
48 : 3384819 : pfn += dev->dma_pfn_offset;
49 : :
50 : : return pfn;
51 : : }
52 : :
53 : : static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
54 : : {
55 : : if (dev) {
56 : : unsigned long pfn = dma_to_pfn(dev, addr);
57 : :
58 : : return phys_to_virt(__pfn_to_phys(pfn));
59 : : }
60 : :
61 : : return (void *)__bus_to_virt((unsigned long)addr);
62 : : }
63 : :
64 : : static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
65 : : {
66 : : if (dev)
67 : : return pfn_to_dma(dev, virt_to_pfn(addr));
68 : :
69 : : return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
70 : : }
71 : :
72 : : #else
73 : : static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
74 : : {
75 : : return __arch_pfn_to_dma(dev, pfn);
76 : : }
77 : :
78 : : static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
79 : : {
80 : : return __arch_dma_to_pfn(dev, addr);
81 : : }
82 : :
83 : : static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
84 : : {
85 : : return __arch_dma_to_virt(dev, addr);
86 : : }
87 : :
88 : : static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
89 : : {
90 : : return __arch_virt_to_dma(dev, addr);
91 : : }
92 : : #endif
93 : :
94 : : /**
95 : : * arm_dma_alloc - allocate consistent memory for DMA
96 : : * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
97 : : * @size: required memory size
98 : : * @handle: bus-specific DMA address
99 : : * @attrs: optinal attributes that specific mapping properties
100 : : *
101 : : * Allocate some memory for a device for performing DMA. This function
102 : : * allocates pages, and will return the CPU-viewed address, and sets @handle
103 : : * to be the device-viewed address.
104 : : */
105 : : extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
106 : : gfp_t gfp, unsigned long attrs);
107 : :
108 : : /**
109 : : * arm_dma_free - free memory allocated by arm_dma_alloc
110 : : * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
111 : : * @size: size of memory originally requested in dma_alloc_coherent
112 : : * @cpu_addr: CPU-view address returned from dma_alloc_coherent
113 : : * @handle: device-view address returned from dma_alloc_coherent
114 : : * @attrs: optinal attributes that specific mapping properties
115 : : *
116 : : * Free (and unmap) a DMA buffer previously allocated by
117 : : * arm_dma_alloc().
118 : : *
119 : : * References to memory and mappings associated with cpu_addr/handle
120 : : * during and after this call executing are illegal.
121 : : */
122 : : extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
123 : : dma_addr_t handle, unsigned long attrs);
124 : :
125 : : /**
126 : : * arm_dma_mmap - map a coherent DMA allocation into user space
127 : : * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
128 : : * @vma: vm_area_struct describing requested user mapping
129 : : * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
130 : : * @handle: device-view address returned from dma_alloc_coherent
131 : : * @size: size of memory originally requested in dma_alloc_coherent
132 : : * @attrs: optinal attributes that specific mapping properties
133 : : *
134 : : * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
135 : : * into user space. The coherent DMA buffer must not be freed by the
136 : : * driver until the user space mapping has been released.
137 : : */
138 : : extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
139 : : void *cpu_addr, dma_addr_t dma_addr, size_t size,
140 : : unsigned long attrs);
141 : :
142 : : /*
143 : : * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
144 : : * and utilize bounce buffers as needed to work around limited DMA windows.
145 : : *
146 : : * On the SA-1111, a bug limits DMA to only certain regions of RAM.
147 : : * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
148 : : * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
149 : : *
150 : : * The following are helper functions used by the dmabounce subystem
151 : : *
152 : : */
153 : :
154 : : /**
155 : : * dmabounce_register_dev
156 : : *
157 : : * @dev: valid struct device pointer
158 : : * @small_buf_size: size of buffers to use with small buffer pool
159 : : * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
160 : : * @needs_bounce_fn: called to determine whether buffer needs bouncing
161 : : *
162 : : * This function should be called by low-level platform code to register
163 : : * a device as requireing DMA buffer bouncing. The function will allocate
164 : : * appropriate DMA pools for the device.
165 : : */
166 : : extern int dmabounce_register_dev(struct device *, unsigned long,
167 : : unsigned long, int (*)(struct device *, dma_addr_t, size_t));
168 : :
169 : : /**
170 : : * dmabounce_unregister_dev
171 : : *
172 : : * @dev: valid struct device pointer
173 : : *
174 : : * This function should be called by low-level platform code when device
175 : : * that was previously registered with dmabounce_register_dev is removed
176 : : * from the system.
177 : : *
178 : : */
179 : : extern void dmabounce_unregister_dev(struct device *);
180 : :
181 : :
182 : :
183 : : /*
184 : : * The scatter list versions of the above methods.
185 : : */
186 : : extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
187 : : enum dma_data_direction, unsigned long attrs);
188 : : extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
189 : : enum dma_data_direction, unsigned long attrs);
190 : : extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
191 : : enum dma_data_direction);
192 : : extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
193 : : enum dma_data_direction);
194 : : extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
195 : : void *cpu_addr, dma_addr_t dma_addr, size_t size,
196 : : unsigned long attrs);
197 : :
198 : : #endif /* __KERNEL__ */
199 : : #endif
|