1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* 3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> 4 * Martin Schwidefsky <schwidefsky@de.ibm.com> 5 * Bugreports.to..: <Linux390@de.ibm.com> 6 * Copyright IBM Corp. 2000 7 * 8 * History of changes 9 * 07/24/00 new file 10 * 05/04/02 code restructuring. 11 */ 12 13 #ifndef _S390_IDALS_H 14 #define _S390_IDALS_H 15 16 #include <linux/errno.h> 17 #include <linux/err.h> 18 #include <linux/types.h> 19 #include <linux/slab.h> 20 #include <linux/uaccess.h> 21 #include <asm/dma-types.h> 22 #include <asm/cio.h> 23 24 #define IDA_SIZE_SHIFT 12 25 #define IDA_BLOCK_SIZE (1UL << IDA_SIZE_SHIFT) 26 27 #define IDA_2K_SIZE_SHIFT 11 28 #define IDA_2K_BLOCK_SIZE (1UL << IDA_2K_SIZE_SHIFT) 29 30 /* 31 * Test if an address/length pair needs an idal list. 32 */ 33 static inline bool idal_is_needed(void *vaddr, unsigned int length) 34 { 35 dma64_t paddr = virt_to_dma64(vaddr); 36 37 return (((__force unsigned long)(paddr) + length - 1) >> 31) != 0; 38 } 39 40 /* 41 * Return the number of idal words needed for an address/length pair. 42 */ 43 static inline unsigned int idal_nr_words(void *vaddr, unsigned int length) 44 { 45 unsigned int cidaw; 46 47 cidaw = (unsigned long)vaddr & (IDA_BLOCK_SIZE - 1); 48 cidaw += length + IDA_BLOCK_SIZE - 1; 49 cidaw >>= IDA_SIZE_SHIFT; 50 return cidaw; 51 } 52 53 /* 54 * Return the number of 2K IDA words needed for an address/length pair. 55 */ 56 static inline unsigned int idal_2k_nr_words(void *vaddr, unsigned int length) 57 { 58 unsigned int cidaw; 59 60 cidaw = (unsigned long)vaddr & (IDA_2K_BLOCK_SIZE - 1); 61 cidaw += length + IDA_2K_BLOCK_SIZE - 1; 62 cidaw >>= IDA_2K_SIZE_SHIFT; 63 return cidaw; 64 } 65 66 /* 67 * Create the list of idal words for an address/length pair. 68 */ 69 static inline dma64_t *idal_create_words(dma64_t *idaws, void *vaddr, unsigned int length) 70 { 71 dma64_t paddr = virt_to_dma64(vaddr); 72 unsigned int cidaw; 73 74 *idaws++ = paddr; 75 cidaw = idal_nr_words(vaddr, length); 76 paddr = dma64_and(paddr, -IDA_BLOCK_SIZE); 77 while (--cidaw > 0) { 78 paddr = dma64_add(paddr, IDA_BLOCK_SIZE); 79 *idaws++ = paddr; 80 } 81 return idaws; 82 } 83 84 /* 85 * Sets the address of the data in CCW. 86 * If necessary it allocates an IDAL and sets the appropriate flags. 87 */ 88 static inline int set_normalized_cda(struct ccw1 *ccw, void *vaddr) 89 { 90 unsigned int nridaws; 91 dma64_t *idal; 92 93 if (ccw->flags & CCW_FLAG_IDA) 94 return -EINVAL; 95 nridaws = idal_nr_words(vaddr, ccw->count); 96 if (nridaws > 0) { 97 idal = kcalloc(nridaws, sizeof(*idal), GFP_ATOMIC | GFP_DMA); 98 if (!idal) 99 return -ENOMEM; 100 idal_create_words(idal, vaddr, ccw->count); 101 ccw->flags |= CCW_FLAG_IDA; 102 vaddr = idal; 103 } 104 ccw->cda = virt_to_dma32(vaddr); 105 return 0; 106 } 107 108 /* 109 * Releases any allocated IDAL related to the CCW. 110 */ 111 static inline void clear_normalized_cda(struct ccw1 *ccw) 112 { 113 if (ccw->flags & CCW_FLAG_IDA) { 114 kfree(dma32_to_virt(ccw->cda)); 115 ccw->flags &= ~CCW_FLAG_IDA; 116 } 117 ccw->cda = 0; 118 } 119 120 /* 121 * Idal buffer extension 122 */ 123 struct idal_buffer { 124 size_t size; 125 size_t page_order; 126 dma64_t data[]; 127 }; 128 129 /* 130 * Allocate an idal buffer 131 */ 132 static inline struct idal_buffer *idal_buffer_alloc(size_t size, int page_order) 133 { 134 int nr_chunks, nr_ptrs, i; 135 struct idal_buffer *ib; 136 void *vaddr; 137 138 nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_SHIFT; 139 nr_chunks = (PAGE_SIZE << page_order) >> IDA_SIZE_SHIFT; 140 ib = kmalloc(struct_size(ib, data, nr_ptrs), GFP_DMA | GFP_KERNEL); 141 if (!ib) 142 return ERR_PTR(-ENOMEM); 143 ib->size = size; 144 ib->page_order = page_order; 145 for (i = 0; i < nr_ptrs; i++) { 146 if (i & (nr_chunks - 1)) { 147 ib->data[i] = dma64_add(ib->data[i - 1], IDA_BLOCK_SIZE); 148 continue; 149 } 150 vaddr = (void *)__get_free_pages(GFP_KERNEL, page_order); 151 if (!vaddr) 152 goto error; 153 ib->data[i] = virt_to_dma64(vaddr); 154 } 155 return ib; 156 error: 157 while (i >= nr_chunks) { 158 i -= nr_chunks; 159 vaddr = dma64_to_virt(ib->data[i]); 160 free_pages((unsigned long)vaddr, ib->page_order); 161 } 162 kfree(ib); 163 return ERR_PTR(-ENOMEM); 164 } 165 166 /* 167 * Free an idal buffer. 168 */ 169 static inline void idal_buffer_free(struct idal_buffer *ib) 170 { 171 int nr_chunks, nr_ptrs, i; 172 void *vaddr; 173 174 nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_SHIFT; 175 nr_chunks = (PAGE_SIZE << ib->page_order) >> IDA_SIZE_SHIFT; 176 for (i = 0; i < nr_ptrs; i += nr_chunks) { 177 vaddr = dma64_to_virt(ib->data[i]); 178 free_pages((unsigned long)vaddr, ib->page_order); 179 } 180 kfree(ib); 181 } 182 183 /* 184 * Test if a idal list is really needed. 185 */ 186 static inline bool __idal_buffer_is_needed(struct idal_buffer *ib) 187 { 188 if (ib->size > (PAGE_SIZE << ib->page_order)) 189 return true; 190 return idal_is_needed(dma64_to_virt(ib->data[0]), ib->size); 191 } 192 193 /* 194 * Set channel data address to idal buffer. 195 */ 196 static inline void idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw) 197 { 198 void *vaddr; 199 200 if (__idal_buffer_is_needed(ib)) { 201 /* Setup idals */ 202 ccw->cda = virt_to_dma32(ib->data); 203 ccw->flags |= CCW_FLAG_IDA; 204 } else { 205 /* 206 * No idals needed - use direct addressing. Convert from 207 * dma64_t to virt and then to dma32_t only because of type 208 * checking. The physical address is known to be below 2GB. 209 */ 210 vaddr = dma64_to_virt(ib->data[0]); 211 ccw->cda = virt_to_dma32(vaddr); 212 } 213 ccw->count = ib->size; 214 } 215 216 /* 217 * Copy count bytes from an idal buffer to user memory 218 */ 219 static inline size_t idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count) 220 { 221 size_t left; 222 void *vaddr; 223 int i; 224 225 BUG_ON(count > ib->size); 226 for (i = 0; count > IDA_BLOCK_SIZE; i++) { 227 vaddr = dma64_to_virt(ib->data[i]); 228 left = copy_to_user(to, vaddr, IDA_BLOCK_SIZE); 229 if (left) 230 return left + count - IDA_BLOCK_SIZE; 231 to = (void __user *)to + IDA_BLOCK_SIZE; 232 count -= IDA_BLOCK_SIZE; 233 } 234 vaddr = dma64_to_virt(ib->data[i]); 235 return copy_to_user(to, vaddr, count); 236 } 237 238 /* 239 * Copy count bytes from user memory to an idal buffer 240 */ 241 static inline size_t idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count) 242 { 243 size_t left; 244 void *vaddr; 245 int i; 246 247 BUG_ON(count > ib->size); 248 for (i = 0; count > IDA_BLOCK_SIZE; i++) { 249 vaddr = dma64_to_virt(ib->data[i]); 250 left = copy_from_user(vaddr, from, IDA_BLOCK_SIZE); 251 if (left) 252 return left + count - IDA_BLOCK_SIZE; 253 from = (void __user *)from + IDA_BLOCK_SIZE; 254 count -= IDA_BLOCK_SIZE; 255 } 256 vaddr = dma64_to_virt(ib->data[i]); 257 return copy_from_user(vaddr, from, count); 258 } 259 260 #endif 261
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.