1 // SPDX-License-Identifier: GPL-2.0-or-later << 2 /* 1 /* 3 * Symmetric key cipher operations. 2 * Symmetric key cipher operations. 4 * 3 * 5 * Generic encrypt/decrypt wrapper for ciphers 4 * Generic encrypt/decrypt wrapper for ciphers, handles operations across 6 * multiple page boundaries by using temporary 5 * multiple page boundaries by using temporary blocks. In user context, 7 * the kernel is given a chance to schedule us 6 * the kernel is given a chance to schedule us once per page. 8 * 7 * 9 * Copyright (c) 2015 Herbert Xu <herbert@gond 8 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au> >> 9 * >> 10 * This program is free software; you can redistribute it and/or modify it >> 11 * under the terms of the GNU General Public License as published by the Free >> 12 * Software Foundation; either version 2 of the License, or (at your option) >> 13 * any later version. >> 14 * 10 */ 15 */ 11 16 12 #include <crypto/internal/aead.h> 17 #include <crypto/internal/aead.h> 13 #include <crypto/internal/cipher.h> << 14 #include <crypto/internal/skcipher.h> 18 #include <crypto/internal/skcipher.h> 15 #include <crypto/scatterwalk.h> 19 #include <crypto/scatterwalk.h> 16 #include <linux/bug.h> 20 #include <linux/bug.h> 17 #include <linux/cryptouser.h> 21 #include <linux/cryptouser.h> 18 #include <linux/err.h> !! 22 #include <linux/compiler.h> 19 #include <linux/kernel.h> << 20 #include <linux/list.h> 23 #include <linux/list.h> 21 #include <linux/mm.h> << 22 #include <linux/module.h> 24 #include <linux/module.h> >> 25 #include <linux/rtnetlink.h> 23 #include <linux/seq_file.h> 26 #include <linux/seq_file.h> 24 #include <linux/slab.h> << 25 #include <linux/string.h> << 26 #include <net/netlink.h> 27 #include <net/netlink.h> 27 #include "skcipher.h" << 28 28 29 #define CRYPTO_ALG_TYPE_SKCIPHER_MASK 0x0000 !! 29 #include "internal.h" 30 30 31 enum { 31 enum { 32 SKCIPHER_WALK_PHYS = 1 << 0, 32 SKCIPHER_WALK_PHYS = 1 << 0, 33 SKCIPHER_WALK_SLOW = 1 << 1, 33 SKCIPHER_WALK_SLOW = 1 << 1, 34 SKCIPHER_WALK_COPY = 1 << 2, 34 SKCIPHER_WALK_COPY = 1 << 2, 35 SKCIPHER_WALK_DIFF = 1 << 3, 35 SKCIPHER_WALK_DIFF = 1 << 3, 36 SKCIPHER_WALK_SLEEP = 1 << 4, 36 SKCIPHER_WALK_SLEEP = 1 << 4, 37 }; 37 }; 38 38 39 struct skcipher_walk_buffer { 39 struct skcipher_walk_buffer { 40 struct list_head entry; 40 struct list_head entry; 41 struct scatter_walk dst; 41 struct scatter_walk dst; 42 unsigned int len; 42 unsigned int len; 43 u8 *data; 43 u8 *data; 44 u8 buffer[]; 44 u8 buffer[]; 45 }; 45 }; 46 46 47 static const struct crypto_type crypto_skciphe << 48 << 49 static int skcipher_walk_next(struct skcipher_ 47 static int skcipher_walk_next(struct skcipher_walk *walk); 50 48 >> 49 static inline void skcipher_unmap(struct scatter_walk *walk, void *vaddr) >> 50 { >> 51 if (PageHighMem(scatterwalk_page(walk))) >> 52 kunmap_atomic(vaddr); >> 53 } >> 54 >> 55 static inline void *skcipher_map(struct scatter_walk *walk) >> 56 { >> 57 struct page *page = scatterwalk_page(walk); >> 58 >> 59 return (PageHighMem(page) ? kmap_atomic(page) : page_address(page)) + >> 60 offset_in_page(walk->offset); >> 61 } >> 62 51 static inline void skcipher_map_src(struct skc 63 static inline void skcipher_map_src(struct skcipher_walk *walk) 52 { 64 { 53 walk->src.virt.addr = scatterwalk_map( !! 65 walk->src.virt.addr = skcipher_map(&walk->in); 54 } 66 } 55 67 56 static inline void skcipher_map_dst(struct skc 68 static inline void skcipher_map_dst(struct skcipher_walk *walk) 57 { 69 { 58 walk->dst.virt.addr = scatterwalk_map( !! 70 walk->dst.virt.addr = skcipher_map(&walk->out); 59 } 71 } 60 72 61 static inline void skcipher_unmap_src(struct s 73 static inline void skcipher_unmap_src(struct skcipher_walk *walk) 62 { 74 { 63 scatterwalk_unmap(walk->src.virt.addr) !! 75 skcipher_unmap(&walk->in, walk->src.virt.addr); 64 } 76 } 65 77 66 static inline void skcipher_unmap_dst(struct s 78 static inline void skcipher_unmap_dst(struct skcipher_walk *walk) 67 { 79 { 68 scatterwalk_unmap(walk->dst.virt.addr) !! 80 skcipher_unmap(&walk->out, walk->dst.virt.addr); 69 } 81 } 70 82 71 static inline gfp_t skcipher_walk_gfp(struct s 83 static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk) 72 { 84 { 73 return walk->flags & SKCIPHER_WALK_SLE 85 return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC; 74 } 86 } 75 87 76 /* Get a spot of the specified length that doe 88 /* Get a spot of the specified length that does not straddle a page. 77 * The caller needs to ensure that there is en 89 * The caller needs to ensure that there is enough space for this operation. 78 */ 90 */ 79 static inline u8 *skcipher_get_spot(u8 *start, 91 static inline u8 *skcipher_get_spot(u8 *start, unsigned int len) 80 { 92 { 81 u8 *end_page = (u8 *)(((unsigned long) 93 u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK); 82 94 83 return max(start, end_page); 95 return max(start, end_page); 84 } 96 } 85 97 86 static inline struct skcipher_alg *__crypto_sk !! 98 static void skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize) 87 struct crypto_alg *alg) << 88 { << 89 return container_of(alg, struct skciph << 90 } << 91 << 92 static int skcipher_done_slow(struct skcipher_ << 93 { 99 { 94 u8 *addr; 100 u8 *addr; 95 101 96 addr = (u8 *)ALIGN((unsigned long)walk 102 addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1); 97 addr = skcipher_get_spot(addr, bsize); 103 addr = skcipher_get_spot(addr, bsize); 98 scatterwalk_copychunks(addr, &walk->ou 104 scatterwalk_copychunks(addr, &walk->out, bsize, 99 (walk->flags & 105 (walk->flags & SKCIPHER_WALK_PHYS) ? 2 : 1); 100 return 0; << 101 } 106 } 102 107 103 int skcipher_walk_done(struct skcipher_walk *w 108 int skcipher_walk_done(struct skcipher_walk *walk, int err) 104 { 109 { 105 unsigned int n = walk->nbytes; !! 110 unsigned int n; /* bytes processed */ 106 unsigned int nbytes = 0; !! 111 bool more; 107 112 108 if (!n) !! 113 if (unlikely(err < 0)) 109 goto finish; 114 goto finish; 110 115 111 if (likely(err >= 0)) { !! 116 n = walk->nbytes - err; 112 n -= err; !! 117 walk->total -= n; 113 nbytes = walk->total - n; !! 118 more = (walk->total != 0); 114 } << 115 119 116 if (likely(!(walk->flags & (SKCIPHER_W 120 if (likely(!(walk->flags & (SKCIPHER_WALK_PHYS | 117 SKCIPHER_W 121 SKCIPHER_WALK_SLOW | 118 SKCIPHER_W 122 SKCIPHER_WALK_COPY | 119 SKCIPHER_W 123 SKCIPHER_WALK_DIFF)))) { 120 unmap_src: 124 unmap_src: 121 skcipher_unmap_src(walk); 125 skcipher_unmap_src(walk); 122 } else if (walk->flags & SKCIPHER_WALK 126 } else if (walk->flags & SKCIPHER_WALK_DIFF) { 123 skcipher_unmap_dst(walk); 127 skcipher_unmap_dst(walk); 124 goto unmap_src; 128 goto unmap_src; 125 } else if (walk->flags & SKCIPHER_WALK 129 } else if (walk->flags & SKCIPHER_WALK_COPY) { 126 skcipher_map_dst(walk); 130 skcipher_map_dst(walk); 127 memcpy(walk->dst.virt.addr, wa 131 memcpy(walk->dst.virt.addr, walk->page, n); 128 skcipher_unmap_dst(walk); 132 skcipher_unmap_dst(walk); 129 } else if (unlikely(walk->flags & SKCI 133 } else if (unlikely(walk->flags & SKCIPHER_WALK_SLOW)) { 130 if (err > 0) { !! 134 if (WARN_ON(err)) { 131 /* !! 135 /* unexpected case; didn't process all bytes */ 132 * Didn't process all << 133 * broken, or this was << 134 * the message wasn't << 135 * the algorithm requi << 136 */ << 137 err = -EINVAL; 136 err = -EINVAL; 138 nbytes = 0; !! 137 goto finish; 139 } else !! 138 } 140 n = skcipher_done_slow !! 139 skcipher_done_slow(walk, n); >> 140 goto already_advanced; 141 } 141 } 142 142 143 if (err > 0) << 144 err = 0; << 145 << 146 walk->total = nbytes; << 147 walk->nbytes = 0; << 148 << 149 scatterwalk_advance(&walk->in, n); 143 scatterwalk_advance(&walk->in, n); 150 scatterwalk_advance(&walk->out, n); 144 scatterwalk_advance(&walk->out, n); 151 scatterwalk_done(&walk->in, 0, nbytes) !! 145 already_advanced: 152 scatterwalk_done(&walk->out, 1, nbytes !! 146 scatterwalk_done(&walk->in, 0, more); >> 147 scatterwalk_done(&walk->out, 1, more); 153 148 154 if (nbytes) { !! 149 if (more) { 155 crypto_yield(walk->flags & SKC 150 crypto_yield(walk->flags & SKCIPHER_WALK_SLEEP ? 156 CRYPTO_TFM_REQ_MA 151 CRYPTO_TFM_REQ_MAY_SLEEP : 0); 157 return skcipher_walk_next(walk 152 return skcipher_walk_next(walk); 158 } 153 } 159 !! 154 err = 0; 160 finish: 155 finish: >> 156 walk->nbytes = 0; >> 157 161 /* Short-circuit for the common/fast p 158 /* Short-circuit for the common/fast path. */ 162 if (!((unsigned long)walk->buffer | (u 159 if (!((unsigned long)walk->buffer | (unsigned long)walk->page)) 163 goto out; 160 goto out; 164 161 165 if (walk->flags & SKCIPHER_WALK_PHYS) 162 if (walk->flags & SKCIPHER_WALK_PHYS) 166 goto out; 163 goto out; 167 164 168 if (walk->iv != walk->oiv) 165 if (walk->iv != walk->oiv) 169 memcpy(walk->oiv, walk->iv, wa 166 memcpy(walk->oiv, walk->iv, walk->ivsize); 170 if (walk->buffer != walk->page) 167 if (walk->buffer != walk->page) 171 kfree(walk->buffer); 168 kfree(walk->buffer); 172 if (walk->page) 169 if (walk->page) 173 free_page((unsigned long)walk- 170 free_page((unsigned long)walk->page); 174 171 175 out: 172 out: 176 return err; 173 return err; 177 } 174 } 178 EXPORT_SYMBOL_GPL(skcipher_walk_done); 175 EXPORT_SYMBOL_GPL(skcipher_walk_done); 179 176 180 void skcipher_walk_complete(struct skcipher_wa 177 void skcipher_walk_complete(struct skcipher_walk *walk, int err) 181 { 178 { 182 struct skcipher_walk_buffer *p, *tmp; 179 struct skcipher_walk_buffer *p, *tmp; 183 180 184 list_for_each_entry_safe(p, tmp, &walk 181 list_for_each_entry_safe(p, tmp, &walk->buffers, entry) { 185 u8 *data; 182 u8 *data; 186 183 187 if (err) 184 if (err) 188 goto done; 185 goto done; 189 186 190 data = p->data; 187 data = p->data; 191 if (!data) { 188 if (!data) { 192 data = PTR_ALIGN(&p->b 189 data = PTR_ALIGN(&p->buffer[0], walk->alignmask + 1); 193 data = skcipher_get_sp 190 data = skcipher_get_spot(data, walk->stride); 194 } 191 } 195 192 196 scatterwalk_copychunks(data, & 193 scatterwalk_copychunks(data, &p->dst, p->len, 1); 197 194 198 if (offset_in_page(p->data) + 195 if (offset_in_page(p->data) + p->len + walk->stride > 199 PAGE_SIZE) 196 PAGE_SIZE) 200 free_page((unsigned lo 197 free_page((unsigned long)p->data); 201 198 202 done: 199 done: 203 list_del(&p->entry); 200 list_del(&p->entry); 204 kfree(p); 201 kfree(p); 205 } 202 } 206 203 207 if (!err && walk->iv != walk->oiv) 204 if (!err && walk->iv != walk->oiv) 208 memcpy(walk->oiv, walk->iv, wa 205 memcpy(walk->oiv, walk->iv, walk->ivsize); 209 if (walk->buffer != walk->page) 206 if (walk->buffer != walk->page) 210 kfree(walk->buffer); 207 kfree(walk->buffer); 211 if (walk->page) 208 if (walk->page) 212 free_page((unsigned long)walk- 209 free_page((unsigned long)walk->page); 213 } 210 } 214 EXPORT_SYMBOL_GPL(skcipher_walk_complete); 211 EXPORT_SYMBOL_GPL(skcipher_walk_complete); 215 212 216 static void skcipher_queue_write(struct skciph 213 static void skcipher_queue_write(struct skcipher_walk *walk, 217 struct skciph 214 struct skcipher_walk_buffer *p) 218 { 215 { 219 p->dst = walk->out; 216 p->dst = walk->out; 220 list_add_tail(&p->entry, &walk->buffer 217 list_add_tail(&p->entry, &walk->buffers); 221 } 218 } 222 219 223 static int skcipher_next_slow(struct skcipher_ 220 static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize) 224 { 221 { 225 bool phys = walk->flags & SKCIPHER_WAL 222 bool phys = walk->flags & SKCIPHER_WALK_PHYS; 226 unsigned alignmask = walk->alignmask; 223 unsigned alignmask = walk->alignmask; 227 struct skcipher_walk_buffer *p; 224 struct skcipher_walk_buffer *p; 228 unsigned a; 225 unsigned a; 229 unsigned n; 226 unsigned n; 230 u8 *buffer; 227 u8 *buffer; 231 void *v; 228 void *v; 232 229 233 if (!phys) { 230 if (!phys) { 234 if (!walk->buffer) 231 if (!walk->buffer) 235 walk->buffer = walk->p 232 walk->buffer = walk->page; 236 buffer = walk->buffer; 233 buffer = walk->buffer; 237 if (buffer) 234 if (buffer) 238 goto ok; 235 goto ok; 239 } 236 } 240 237 241 /* Start with the minimum alignment of 238 /* Start with the minimum alignment of kmalloc. */ 242 a = crypto_tfm_ctx_alignment() - 1; 239 a = crypto_tfm_ctx_alignment() - 1; 243 n = bsize; 240 n = bsize; 244 241 245 if (phys) { 242 if (phys) { 246 /* Calculate the minimum align 243 /* Calculate the minimum alignment of p->buffer. */ 247 a &= (sizeof(*p) ^ (sizeof(*p) 244 a &= (sizeof(*p) ^ (sizeof(*p) - 1)) >> 1; 248 n += sizeof(*p); 245 n += sizeof(*p); 249 } 246 } 250 247 251 /* Minimum size to align p->buffer by 248 /* Minimum size to align p->buffer by alignmask. */ 252 n += alignmask & ~a; 249 n += alignmask & ~a; 253 250 254 /* Minimum size to ensure p->buffer do 251 /* Minimum size to ensure p->buffer does not straddle a page. */ 255 n += (bsize - 1) & ~(alignmask | a); 252 n += (bsize - 1) & ~(alignmask | a); 256 253 257 v = kzalloc(n, skcipher_walk_gfp(walk) 254 v = kzalloc(n, skcipher_walk_gfp(walk)); 258 if (!v) 255 if (!v) 259 return skcipher_walk_done(walk 256 return skcipher_walk_done(walk, -ENOMEM); 260 257 261 if (phys) { 258 if (phys) { 262 p = v; 259 p = v; 263 p->len = bsize; 260 p->len = bsize; 264 skcipher_queue_write(walk, p); 261 skcipher_queue_write(walk, p); 265 buffer = p->buffer; 262 buffer = p->buffer; 266 } else { 263 } else { 267 walk->buffer = v; 264 walk->buffer = v; 268 buffer = v; 265 buffer = v; 269 } 266 } 270 267 271 ok: 268 ok: 272 walk->dst.virt.addr = PTR_ALIGN(buffer 269 walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1); 273 walk->dst.virt.addr = skcipher_get_spo 270 walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize); 274 walk->src.virt.addr = walk->dst.virt.a 271 walk->src.virt.addr = walk->dst.virt.addr; 275 272 276 scatterwalk_copychunks(walk->src.virt. 273 scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0); 277 274 278 walk->nbytes = bsize; 275 walk->nbytes = bsize; 279 walk->flags |= SKCIPHER_WALK_SLOW; 276 walk->flags |= SKCIPHER_WALK_SLOW; 280 277 281 return 0; 278 return 0; 282 } 279 } 283 280 284 static int skcipher_next_copy(struct skcipher_ 281 static int skcipher_next_copy(struct skcipher_walk *walk) 285 { 282 { 286 struct skcipher_walk_buffer *p; 283 struct skcipher_walk_buffer *p; 287 u8 *tmp = walk->page; 284 u8 *tmp = walk->page; 288 285 289 skcipher_map_src(walk); 286 skcipher_map_src(walk); 290 memcpy(tmp, walk->src.virt.addr, walk- 287 memcpy(tmp, walk->src.virt.addr, walk->nbytes); 291 skcipher_unmap_src(walk); 288 skcipher_unmap_src(walk); 292 289 293 walk->src.virt.addr = tmp; 290 walk->src.virt.addr = tmp; 294 walk->dst.virt.addr = tmp; 291 walk->dst.virt.addr = tmp; 295 292 296 if (!(walk->flags & SKCIPHER_WALK_PHYS 293 if (!(walk->flags & SKCIPHER_WALK_PHYS)) 297 return 0; 294 return 0; 298 295 299 p = kmalloc(sizeof(*p), skcipher_walk_ 296 p = kmalloc(sizeof(*p), skcipher_walk_gfp(walk)); 300 if (!p) 297 if (!p) 301 return -ENOMEM; 298 return -ENOMEM; 302 299 303 p->data = walk->page; 300 p->data = walk->page; 304 p->len = walk->nbytes; 301 p->len = walk->nbytes; 305 skcipher_queue_write(walk, p); 302 skcipher_queue_write(walk, p); 306 303 307 if (offset_in_page(walk->page) + walk- 304 if (offset_in_page(walk->page) + walk->nbytes + walk->stride > 308 PAGE_SIZE) 305 PAGE_SIZE) 309 walk->page = NULL; 306 walk->page = NULL; 310 else 307 else 311 walk->page += walk->nbytes; 308 walk->page += walk->nbytes; 312 309 313 return 0; 310 return 0; 314 } 311 } 315 312 316 static int skcipher_next_fast(struct skcipher_ 313 static int skcipher_next_fast(struct skcipher_walk *walk) 317 { 314 { 318 unsigned long diff; 315 unsigned long diff; 319 316 320 walk->src.phys.page = scatterwalk_page 317 walk->src.phys.page = scatterwalk_page(&walk->in); 321 walk->src.phys.offset = offset_in_page 318 walk->src.phys.offset = offset_in_page(walk->in.offset); 322 walk->dst.phys.page = scatterwalk_page 319 walk->dst.phys.page = scatterwalk_page(&walk->out); 323 walk->dst.phys.offset = offset_in_page 320 walk->dst.phys.offset = offset_in_page(walk->out.offset); 324 321 325 if (walk->flags & SKCIPHER_WALK_PHYS) 322 if (walk->flags & SKCIPHER_WALK_PHYS) 326 return 0; 323 return 0; 327 324 328 diff = walk->src.phys.offset - walk->d 325 diff = walk->src.phys.offset - walk->dst.phys.offset; 329 diff |= walk->src.virt.page - walk->ds 326 diff |= walk->src.virt.page - walk->dst.virt.page; 330 327 331 skcipher_map_src(walk); 328 skcipher_map_src(walk); 332 walk->dst.virt.addr = walk->src.virt.a 329 walk->dst.virt.addr = walk->src.virt.addr; 333 330 334 if (diff) { 331 if (diff) { 335 walk->flags |= SKCIPHER_WALK_D 332 walk->flags |= SKCIPHER_WALK_DIFF; 336 skcipher_map_dst(walk); 333 skcipher_map_dst(walk); 337 } 334 } 338 335 339 return 0; 336 return 0; 340 } 337 } 341 338 342 static int skcipher_walk_next(struct skcipher_ 339 static int skcipher_walk_next(struct skcipher_walk *walk) 343 { 340 { 344 unsigned int bsize; 341 unsigned int bsize; 345 unsigned int n; 342 unsigned int n; 346 int err; 343 int err; 347 344 348 walk->flags &= ~(SKCIPHER_WALK_SLOW | 345 walk->flags &= ~(SKCIPHER_WALK_SLOW | SKCIPHER_WALK_COPY | 349 SKCIPHER_WALK_DIFF); 346 SKCIPHER_WALK_DIFF); 350 347 351 n = walk->total; 348 n = walk->total; 352 bsize = min(walk->stride, max(n, walk- 349 bsize = min(walk->stride, max(n, walk->blocksize)); 353 n = scatterwalk_clamp(&walk->in, n); 350 n = scatterwalk_clamp(&walk->in, n); 354 n = scatterwalk_clamp(&walk->out, n); 351 n = scatterwalk_clamp(&walk->out, n); 355 352 356 if (unlikely(n < bsize)) { 353 if (unlikely(n < bsize)) { 357 if (unlikely(walk->total < wal 354 if (unlikely(walk->total < walk->blocksize)) 358 return skcipher_walk_d 355 return skcipher_walk_done(walk, -EINVAL); 359 356 360 slow_path: 357 slow_path: 361 err = skcipher_next_slow(walk, 358 err = skcipher_next_slow(walk, bsize); 362 goto set_phys_lowmem; 359 goto set_phys_lowmem; 363 } 360 } 364 361 365 if (unlikely((walk->in.offset | walk-> 362 if (unlikely((walk->in.offset | walk->out.offset) & walk->alignmask)) { 366 if (!walk->page) { 363 if (!walk->page) { 367 gfp_t gfp = skcipher_w 364 gfp_t gfp = skcipher_walk_gfp(walk); 368 365 369 walk->page = (void *)_ 366 walk->page = (void *)__get_free_page(gfp); 370 if (!walk->page) 367 if (!walk->page) 371 goto slow_path 368 goto slow_path; 372 } 369 } 373 370 374 walk->nbytes = min_t(unsigned, 371 walk->nbytes = min_t(unsigned, n, 375 PAGE_SIZE 372 PAGE_SIZE - offset_in_page(walk->page)); 376 walk->flags |= SKCIPHER_WALK_C 373 walk->flags |= SKCIPHER_WALK_COPY; 377 err = skcipher_next_copy(walk) 374 err = skcipher_next_copy(walk); 378 goto set_phys_lowmem; 375 goto set_phys_lowmem; 379 } 376 } 380 377 381 walk->nbytes = n; 378 walk->nbytes = n; 382 379 383 return skcipher_next_fast(walk); 380 return skcipher_next_fast(walk); 384 381 385 set_phys_lowmem: 382 set_phys_lowmem: 386 if (!err && (walk->flags & SKCIPHER_WA 383 if (!err && (walk->flags & SKCIPHER_WALK_PHYS)) { 387 walk->src.phys.page = virt_to_ 384 walk->src.phys.page = virt_to_page(walk->src.virt.addr); 388 walk->dst.phys.page = virt_to_ 385 walk->dst.phys.page = virt_to_page(walk->dst.virt.addr); 389 walk->src.phys.offset &= PAGE_ 386 walk->src.phys.offset &= PAGE_SIZE - 1; 390 walk->dst.phys.offset &= PAGE_ 387 walk->dst.phys.offset &= PAGE_SIZE - 1; 391 } 388 } 392 return err; 389 return err; 393 } 390 } >> 391 EXPORT_SYMBOL_GPL(skcipher_walk_next); 394 392 395 static int skcipher_copy_iv(struct skcipher_wa 393 static int skcipher_copy_iv(struct skcipher_walk *walk) 396 { 394 { 397 unsigned a = crypto_tfm_ctx_alignment( 395 unsigned a = crypto_tfm_ctx_alignment() - 1; 398 unsigned alignmask = walk->alignmask; 396 unsigned alignmask = walk->alignmask; 399 unsigned ivsize = walk->ivsize; 397 unsigned ivsize = walk->ivsize; 400 unsigned bs = walk->stride; 398 unsigned bs = walk->stride; 401 unsigned aligned_bs; 399 unsigned aligned_bs; 402 unsigned size; 400 unsigned size; 403 u8 *iv; 401 u8 *iv; 404 402 405 aligned_bs = ALIGN(bs, alignmask + 1); 403 aligned_bs = ALIGN(bs, alignmask + 1); 406 404 407 /* Minimum size to align buffer by ali 405 /* Minimum size to align buffer by alignmask. */ 408 size = alignmask & ~a; 406 size = alignmask & ~a; 409 407 410 if (walk->flags & SKCIPHER_WALK_PHYS) 408 if (walk->flags & SKCIPHER_WALK_PHYS) 411 size += ivsize; 409 size += ivsize; 412 else { 410 else { 413 size += aligned_bs + ivsize; 411 size += aligned_bs + ivsize; 414 412 415 /* Minimum size to ensure buff 413 /* Minimum size to ensure buffer does not straddle a page. */ 416 size += (bs - 1) & ~(alignmask 414 size += (bs - 1) & ~(alignmask | a); 417 } 415 } 418 416 419 walk->buffer = kmalloc(size, skcipher_ 417 walk->buffer = kmalloc(size, skcipher_walk_gfp(walk)); 420 if (!walk->buffer) 418 if (!walk->buffer) 421 return -ENOMEM; 419 return -ENOMEM; 422 420 423 iv = PTR_ALIGN(walk->buffer, alignmask 421 iv = PTR_ALIGN(walk->buffer, alignmask + 1); 424 iv = skcipher_get_spot(iv, bs) + align 422 iv = skcipher_get_spot(iv, bs) + aligned_bs; 425 423 426 walk->iv = memcpy(iv, walk->iv, walk-> 424 walk->iv = memcpy(iv, walk->iv, walk->ivsize); 427 return 0; 425 return 0; 428 } 426 } 429 427 430 static int skcipher_walk_first(struct skcipher 428 static int skcipher_walk_first(struct skcipher_walk *walk) 431 { 429 { 432 if (WARN_ON_ONCE(in_hardirq())) !! 430 if (WARN_ON_ONCE(in_irq())) 433 return -EDEADLK; 431 return -EDEADLK; 434 432 435 walk->buffer = NULL; 433 walk->buffer = NULL; 436 if (unlikely(((unsigned long)walk->iv 434 if (unlikely(((unsigned long)walk->iv & walk->alignmask))) { 437 int err = skcipher_copy_iv(wal 435 int err = skcipher_copy_iv(walk); 438 if (err) 436 if (err) 439 return err; 437 return err; 440 } 438 } 441 439 442 walk->page = NULL; 440 walk->page = NULL; >> 441 walk->nbytes = walk->total; 443 442 444 return skcipher_walk_next(walk); 443 return skcipher_walk_next(walk); 445 } 444 } 446 445 447 static int skcipher_walk_skcipher(struct skcip 446 static int skcipher_walk_skcipher(struct skcipher_walk *walk, 448 struct skcip 447 struct skcipher_request *req) 449 { 448 { 450 struct crypto_skcipher *tfm = crypto_s 449 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 451 struct skcipher_alg *alg = crypto_skci << 452 450 453 walk->total = req->cryptlen; 451 walk->total = req->cryptlen; 454 walk->nbytes = 0; 452 walk->nbytes = 0; 455 walk->iv = req->iv; 453 walk->iv = req->iv; 456 walk->oiv = req->iv; 454 walk->oiv = req->iv; 457 455 458 if (unlikely(!walk->total)) 456 if (unlikely(!walk->total)) 459 return 0; 457 return 0; 460 458 461 scatterwalk_start(&walk->in, req->src) 459 scatterwalk_start(&walk->in, req->src); 462 scatterwalk_start(&walk->out, req->dst 460 scatterwalk_start(&walk->out, req->dst); 463 461 464 walk->flags &= ~SKCIPHER_WALK_SLEEP; 462 walk->flags &= ~SKCIPHER_WALK_SLEEP; 465 walk->flags |= req->base.flags & CRYPT 463 walk->flags |= req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? 466 SKCIPHER_WALK_SLEEP : 0 464 SKCIPHER_WALK_SLEEP : 0; 467 465 468 walk->blocksize = crypto_skcipher_bloc 466 walk->blocksize = crypto_skcipher_blocksize(tfm); >> 467 walk->stride = crypto_skcipher_walksize(tfm); 469 walk->ivsize = crypto_skcipher_ivsize( 468 walk->ivsize = crypto_skcipher_ivsize(tfm); 470 walk->alignmask = crypto_skcipher_alig 469 walk->alignmask = crypto_skcipher_alignmask(tfm); 471 470 472 if (alg->co.base.cra_type != &crypto_s << 473 walk->stride = alg->co.chunksi << 474 else << 475 walk->stride = alg->walksize; << 476 << 477 return skcipher_walk_first(walk); 471 return skcipher_walk_first(walk); 478 } 472 } 479 473 480 int skcipher_walk_virt(struct skcipher_walk *w 474 int skcipher_walk_virt(struct skcipher_walk *walk, 481 struct skcipher_request 475 struct skcipher_request *req, bool atomic) 482 { 476 { 483 int err; 477 int err; 484 478 485 might_sleep_if(req->base.flags & CRYPT << 486 << 487 walk->flags &= ~SKCIPHER_WALK_PHYS; 479 walk->flags &= ~SKCIPHER_WALK_PHYS; 488 480 489 err = skcipher_walk_skcipher(walk, req 481 err = skcipher_walk_skcipher(walk, req); 490 482 491 walk->flags &= atomic ? ~SKCIPHER_WALK 483 walk->flags &= atomic ? ~SKCIPHER_WALK_SLEEP : ~0; 492 484 493 return err; 485 return err; 494 } 486 } 495 EXPORT_SYMBOL_GPL(skcipher_walk_virt); 487 EXPORT_SYMBOL_GPL(skcipher_walk_virt); 496 488 >> 489 void skcipher_walk_atomise(struct skcipher_walk *walk) >> 490 { >> 491 walk->flags &= ~SKCIPHER_WALK_SLEEP; >> 492 } >> 493 EXPORT_SYMBOL_GPL(skcipher_walk_atomise); >> 494 497 int skcipher_walk_async(struct skcipher_walk * 495 int skcipher_walk_async(struct skcipher_walk *walk, 498 struct skcipher_reques 496 struct skcipher_request *req) 499 { 497 { 500 walk->flags |= SKCIPHER_WALK_PHYS; 498 walk->flags |= SKCIPHER_WALK_PHYS; 501 499 502 INIT_LIST_HEAD(&walk->buffers); 500 INIT_LIST_HEAD(&walk->buffers); 503 501 504 return skcipher_walk_skcipher(walk, re 502 return skcipher_walk_skcipher(walk, req); 505 } 503 } 506 EXPORT_SYMBOL_GPL(skcipher_walk_async); 504 EXPORT_SYMBOL_GPL(skcipher_walk_async); 507 505 508 static int skcipher_walk_aead_common(struct sk 506 static int skcipher_walk_aead_common(struct skcipher_walk *walk, 509 struct ae 507 struct aead_request *req, bool atomic) 510 { 508 { 511 struct crypto_aead *tfm = crypto_aead_ 509 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 512 int err; 510 int err; 513 511 514 walk->nbytes = 0; 512 walk->nbytes = 0; 515 walk->iv = req->iv; 513 walk->iv = req->iv; 516 walk->oiv = req->iv; 514 walk->oiv = req->iv; 517 515 518 if (unlikely(!walk->total)) 516 if (unlikely(!walk->total)) 519 return 0; 517 return 0; 520 518 521 walk->flags &= ~SKCIPHER_WALK_PHYS; 519 walk->flags &= ~SKCIPHER_WALK_PHYS; 522 520 523 scatterwalk_start(&walk->in, req->src) 521 scatterwalk_start(&walk->in, req->src); 524 scatterwalk_start(&walk->out, req->dst 522 scatterwalk_start(&walk->out, req->dst); 525 523 526 scatterwalk_copychunks(NULL, &walk->in 524 scatterwalk_copychunks(NULL, &walk->in, req->assoclen, 2); 527 scatterwalk_copychunks(NULL, &walk->ou 525 scatterwalk_copychunks(NULL, &walk->out, req->assoclen, 2); 528 526 529 scatterwalk_done(&walk->in, 0, walk->t 527 scatterwalk_done(&walk->in, 0, walk->total); 530 scatterwalk_done(&walk->out, 0, walk-> 528 scatterwalk_done(&walk->out, 0, walk->total); 531 529 532 if (req->base.flags & CRYPTO_TFM_REQ_M 530 if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) 533 walk->flags |= SKCIPHER_WALK_S 531 walk->flags |= SKCIPHER_WALK_SLEEP; 534 else 532 else 535 walk->flags &= ~SKCIPHER_WALK_ 533 walk->flags &= ~SKCIPHER_WALK_SLEEP; 536 534 537 walk->blocksize = crypto_aead_blocksiz 535 walk->blocksize = crypto_aead_blocksize(tfm); 538 walk->stride = crypto_aead_chunksize(t 536 walk->stride = crypto_aead_chunksize(tfm); 539 walk->ivsize = crypto_aead_ivsize(tfm) 537 walk->ivsize = crypto_aead_ivsize(tfm); 540 walk->alignmask = crypto_aead_alignmas 538 walk->alignmask = crypto_aead_alignmask(tfm); 541 539 542 err = skcipher_walk_first(walk); 540 err = skcipher_walk_first(walk); 543 541 544 if (atomic) 542 if (atomic) 545 walk->flags &= ~SKCIPHER_WALK_ 543 walk->flags &= ~SKCIPHER_WALK_SLEEP; 546 544 547 return err; 545 return err; 548 } 546 } 549 547 >> 548 int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req, >> 549 bool atomic) >> 550 { >> 551 walk->total = req->cryptlen; >> 552 >> 553 return skcipher_walk_aead_common(walk, req, atomic); >> 554 } >> 555 EXPORT_SYMBOL_GPL(skcipher_walk_aead); >> 556 550 int skcipher_walk_aead_encrypt(struct skcipher 557 int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, 551 struct aead_req 558 struct aead_request *req, bool atomic) 552 { 559 { 553 walk->total = req->cryptlen; 560 walk->total = req->cryptlen; 554 561 555 return skcipher_walk_aead_common(walk, 562 return skcipher_walk_aead_common(walk, req, atomic); 556 } 563 } 557 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); 564 EXPORT_SYMBOL_GPL(skcipher_walk_aead_encrypt); 558 565 559 int skcipher_walk_aead_decrypt(struct skcipher 566 int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, 560 struct aead_req 567 struct aead_request *req, bool atomic) 561 { 568 { 562 struct crypto_aead *tfm = crypto_aead_ 569 struct crypto_aead *tfm = crypto_aead_reqtfm(req); 563 570 564 walk->total = req->cryptlen - crypto_a 571 walk->total = req->cryptlen - crypto_aead_authsize(tfm); 565 572 566 return skcipher_walk_aead_common(walk, 573 return skcipher_walk_aead_common(walk, req, atomic); 567 } 574 } 568 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); 575 EXPORT_SYMBOL_GPL(skcipher_walk_aead_decrypt); 569 576 570 static void skcipher_set_needkey(struct crypto !! 577 static unsigned int crypto_skcipher_extsize(struct crypto_alg *alg) 571 { 578 { 572 if (crypto_skcipher_max_keysize(tfm) ! !! 579 if (alg->cra_type == &crypto_blkcipher_type) 573 crypto_skcipher_set_flags(tfm, !! 580 return sizeof(struct crypto_blkcipher *); >> 581 >> 582 if (alg->cra_type == &crypto_ablkcipher_type || >> 583 alg->cra_type == &crypto_givcipher_type) >> 584 return sizeof(struct crypto_ablkcipher *); >> 585 >> 586 return crypto_alg_extsize(alg); 574 } 587 } 575 588 576 static int skcipher_setkey_unaligned(struct cr !! 589 static int skcipher_setkey_blkcipher(struct crypto_skcipher *tfm, 577 const u8 590 const u8 *key, unsigned int keylen) 578 { 591 { 579 unsigned long alignmask = crypto_skcip !! 592 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); 580 struct skcipher_alg *cipher = crypto_s !! 593 struct crypto_blkcipher *blkcipher = *ctx; 581 u8 *buffer, *alignbuffer; !! 594 int err; 582 unsigned long absize; << 583 int ret; << 584 595 585 absize = keylen + alignmask; !! 596 crypto_blkcipher_clear_flags(blkcipher, ~0); 586 buffer = kmalloc(absize, GFP_ATOMIC); !! 597 crypto_blkcipher_set_flags(blkcipher, crypto_skcipher_get_flags(tfm) & 587 if (!buffer) !! 598 CRYPTO_TFM_REQ_MASK); 588 return -ENOMEM; !! 599 err = crypto_blkcipher_setkey(blkcipher, key, keylen); >> 600 crypto_skcipher_set_flags(tfm, crypto_blkcipher_get_flags(blkcipher) & >> 601 CRYPTO_TFM_RES_MASK); >> 602 if (err) >> 603 return err; 589 604 590 alignbuffer = (u8 *)ALIGN((unsigned lo !! 605 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); 591 memcpy(alignbuffer, key, keylen); !! 606 return 0; 592 ret = cipher->setkey(tfm, alignbuffer, << 593 kfree_sensitive(buffer); << 594 return ret; << 595 } 607 } 596 608 597 int crypto_skcipher_setkey(struct crypto_skcip !! 609 static int skcipher_crypt_blkcipher(struct skcipher_request *req, 598 unsigned int keylen !! 610 int (*crypt)(struct blkcipher_desc *, >> 611 struct scatterlist *, >> 612 struct scatterlist *, >> 613 unsigned int)) 599 { 614 { 600 struct skcipher_alg *cipher = crypto_s !! 615 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 601 unsigned long alignmask = crypto_skcip !! 616 struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm); 602 int err; !! 617 struct blkcipher_desc desc = { >> 618 .tfm = *ctx, >> 619 .info = req->iv, >> 620 .flags = req->base.flags, >> 621 }; 603 622 604 if (cipher->co.base.cra_type != &crypt << 605 struct crypto_lskcipher **ctx << 606 623 607 crypto_lskcipher_clear_flags(* !! 624 return crypt(&desc, req->dst, req->src, req->cryptlen); 608 crypto_lskcipher_set_flags(*ct !! 625 } 609 cry << 610 CRY << 611 err = crypto_lskcipher_setkey( << 612 goto out; << 613 } << 614 626 615 if (keylen < cipher->min_keysize || ke !! 627 static int skcipher_encrypt_blkcipher(struct skcipher_request *req) 616 return -EINVAL; !! 628 { >> 629 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); >> 630 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); >> 631 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; 617 632 618 if ((unsigned long)key & alignmask) !! 633 return skcipher_crypt_blkcipher(req, alg->encrypt); 619 err = skcipher_setkey_unaligne !! 634 } 620 else << 621 err = cipher->setkey(tfm, key, << 622 635 623 out: !! 636 static int skcipher_decrypt_blkcipher(struct skcipher_request *req) 624 if (unlikely(err)) { !! 637 { 625 skcipher_set_needkey(tfm); !! 638 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); 626 return err; !! 639 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); 627 } !! 640 struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher; 628 641 629 crypto_skcipher_clear_flags(tfm, CRYPT !! 642 return skcipher_crypt_blkcipher(req, alg->decrypt); 630 return 0; << 631 } 643 } 632 EXPORT_SYMBOL_GPL(crypto_skcipher_setkey); << 633 644 634 int crypto_skcipher_encrypt(struct skcipher_re !! 645 static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm) 635 { 646 { 636 struct crypto_skcipher *tfm = crypto_s !! 647 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); 637 struct skcipher_alg *alg = crypto_skci << 638 648 639 if (crypto_skcipher_get_flags(tfm) & C !! 649 crypto_free_blkcipher(*ctx); 640 return -ENOKEY; << 641 if (alg->co.base.cra_type != &crypto_s << 642 return crypto_lskcipher_encryp << 643 return alg->encrypt(req); << 644 } 650 } 645 EXPORT_SYMBOL_GPL(crypto_skcipher_encrypt); << 646 651 647 int crypto_skcipher_decrypt(struct skcipher_re !! 652 static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm) 648 { 653 { 649 struct crypto_skcipher *tfm = crypto_s !! 654 struct crypto_alg *calg = tfm->__crt_alg; 650 struct skcipher_alg *alg = crypto_skci !! 655 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); >> 656 struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm); >> 657 struct crypto_blkcipher *blkcipher; >> 658 struct crypto_tfm *btfm; >> 659 >> 660 if (!crypto_mod_get(calg)) >> 661 return -EAGAIN; >> 662 >> 663 btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER, >> 664 CRYPTO_ALG_TYPE_MASK); >> 665 if (IS_ERR(btfm)) { >> 666 crypto_mod_put(calg); >> 667 return PTR_ERR(btfm); >> 668 } 651 669 652 if (crypto_skcipher_get_flags(tfm) & C !! 670 blkcipher = __crypto_blkcipher_cast(btfm); 653 return -ENOKEY; !! 671 *ctx = blkcipher; 654 if (alg->co.base.cra_type != &crypto_s !! 672 tfm->exit = crypto_exit_skcipher_ops_blkcipher; 655 return crypto_lskcipher_decryp !! 673 656 return alg->decrypt(req); !! 674 skcipher->setkey = skcipher_setkey_blkcipher; >> 675 skcipher->encrypt = skcipher_encrypt_blkcipher; >> 676 skcipher->decrypt = skcipher_decrypt_blkcipher; >> 677 >> 678 skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher); >> 679 skcipher->keysize = calg->cra_blkcipher.max_keysize; >> 680 >> 681 if (skcipher->keysize) >> 682 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); >> 683 >> 684 return 0; 657 } 685 } 658 EXPORT_SYMBOL_GPL(crypto_skcipher_decrypt); << 659 686 660 static int crypto_lskcipher_export(struct skci !! 687 static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm, >> 688 const u8 *key, unsigned int keylen) 661 { 689 { 662 struct crypto_skcipher *tfm = crypto_s !! 690 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); 663 u8 *ivs = skcipher_request_ctx(req); !! 691 struct crypto_ablkcipher *ablkcipher = *ctx; 664 !! 692 int err; 665 ivs = PTR_ALIGN(ivs, crypto_skcipher_a << 666 693 667 memcpy(out, ivs + crypto_skcipher_ivsi !! 694 crypto_ablkcipher_clear_flags(ablkcipher, ~0); 668 crypto_skcipher_statesize(tfm)) !! 695 crypto_ablkcipher_set_flags(ablkcipher, >> 696 crypto_skcipher_get_flags(tfm) & >> 697 CRYPTO_TFM_REQ_MASK); >> 698 err = crypto_ablkcipher_setkey(ablkcipher, key, keylen); >> 699 crypto_skcipher_set_flags(tfm, >> 700 crypto_ablkcipher_get_flags(ablkcipher) & >> 701 CRYPTO_TFM_RES_MASK); >> 702 if (err) >> 703 return err; 669 704 >> 705 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); 670 return 0; 706 return 0; 671 } 707 } 672 708 673 static int crypto_lskcipher_import(struct skci !! 709 static int skcipher_crypt_ablkcipher(struct skcipher_request *req, >> 710 int (*crypt)(struct ablkcipher_request *)) 674 { 711 { 675 struct crypto_skcipher *tfm = crypto_s 712 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 676 u8 *ivs = skcipher_request_ctx(req); !! 713 struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm); >> 714 struct ablkcipher_request *subreq = skcipher_request_ctx(req); 677 715 678 ivs = PTR_ALIGN(ivs, crypto_skcipher_a !! 716 ablkcipher_request_set_tfm(subreq, *ctx); >> 717 ablkcipher_request_set_callback(subreq, skcipher_request_flags(req), >> 718 req->base.complete, req->base.data); >> 719 ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, >> 720 req->iv); 679 721 680 memcpy(ivs + crypto_skcipher_ivsize(tf !! 722 return crypt(subreq); 681 crypto_skcipher_statesize(tfm)) !! 723 } 682 724 683 return 0; !! 725 static int skcipher_encrypt_ablkcipher(struct skcipher_request *req) >> 726 { >> 727 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); >> 728 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); >> 729 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; >> 730 >> 731 return skcipher_crypt_ablkcipher(req, alg->encrypt); 684 } 732 } 685 733 686 static int skcipher_noexport(struct skcipher_r !! 734 static int skcipher_decrypt_ablkcipher(struct skcipher_request *req) 687 { 735 { 688 return 0; !! 736 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); >> 737 struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher); >> 738 struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher; >> 739 >> 740 return skcipher_crypt_ablkcipher(req, alg->decrypt); 689 } 741 } 690 742 691 static int skcipher_noimport(struct skcipher_r !! 743 static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) 692 { 744 { >> 745 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); >> 746 >> 747 crypto_free_ablkcipher(*ctx); >> 748 } >> 749 >> 750 static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm) >> 751 { >> 752 struct crypto_alg *calg = tfm->__crt_alg; >> 753 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); >> 754 struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm); >> 755 struct crypto_ablkcipher *ablkcipher; >> 756 struct crypto_tfm *abtfm; >> 757 >> 758 if (!crypto_mod_get(calg)) >> 759 return -EAGAIN; >> 760 >> 761 abtfm = __crypto_alloc_tfm(calg, 0, 0); >> 762 if (IS_ERR(abtfm)) { >> 763 crypto_mod_put(calg); >> 764 return PTR_ERR(abtfm); >> 765 } >> 766 >> 767 ablkcipher = __crypto_ablkcipher_cast(abtfm); >> 768 *ctx = ablkcipher; >> 769 tfm->exit = crypto_exit_skcipher_ops_ablkcipher; >> 770 >> 771 skcipher->setkey = skcipher_setkey_ablkcipher; >> 772 skcipher->encrypt = skcipher_encrypt_ablkcipher; >> 773 skcipher->decrypt = skcipher_decrypt_ablkcipher; >> 774 >> 775 skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher); >> 776 skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) + >> 777 sizeof(struct ablkcipher_request); >> 778 skcipher->keysize = calg->cra_ablkcipher.max_keysize; >> 779 >> 780 if (skcipher->keysize) >> 781 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); >> 782 693 return 0; 783 return 0; 694 } 784 } 695 785 696 int crypto_skcipher_export(struct skcipher_req !! 786 static int skcipher_setkey_unaligned(struct crypto_skcipher *tfm, >> 787 const u8 *key, unsigned int keylen) 697 { 788 { 698 struct crypto_skcipher *tfm = crypto_s !! 789 unsigned long alignmask = crypto_skcipher_alignmask(tfm); 699 struct skcipher_alg *alg = crypto_skci !! 790 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); >> 791 u8 *buffer, *alignbuffer; >> 792 unsigned long absize; >> 793 int ret; >> 794 >> 795 absize = keylen + alignmask; >> 796 buffer = kmalloc(absize, GFP_ATOMIC); >> 797 if (!buffer) >> 798 return -ENOMEM; 700 799 701 if (alg->co.base.cra_type != &crypto_s !! 800 alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1); 702 return crypto_lskcipher_export !! 801 memcpy(alignbuffer, key, keylen); 703 return alg->export(req, out); !! 802 ret = cipher->setkey(tfm, alignbuffer, keylen); >> 803 kzfree(buffer); >> 804 return ret; 704 } 805 } 705 EXPORT_SYMBOL_GPL(crypto_skcipher_export); << 706 806 707 int crypto_skcipher_import(struct skcipher_req !! 807 static int skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key, >> 808 unsigned int keylen) 708 { 809 { 709 struct crypto_skcipher *tfm = crypto_s !! 810 struct skcipher_alg *cipher = crypto_skcipher_alg(tfm); 710 struct skcipher_alg *alg = crypto_skci !! 811 unsigned long alignmask = crypto_skcipher_alignmask(tfm); >> 812 int err; >> 813 >> 814 if (keylen < cipher->min_keysize || keylen > cipher->max_keysize) { >> 815 crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); >> 816 return -EINVAL; >> 817 } >> 818 >> 819 if ((unsigned long)key & alignmask) >> 820 err = skcipher_setkey_unaligned(tfm, key, keylen); >> 821 else >> 822 err = cipher->setkey(tfm, key, keylen); 711 823 712 if (alg->co.base.cra_type != &crypto_s !! 824 if (err) 713 return crypto_lskcipher_import !! 825 return err; 714 return alg->import(req, in); !! 826 >> 827 crypto_skcipher_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); >> 828 return 0; 715 } 829 } 716 EXPORT_SYMBOL_GPL(crypto_skcipher_import); << 717 830 718 static void crypto_skcipher_exit_tfm(struct cr 831 static void crypto_skcipher_exit_tfm(struct crypto_tfm *tfm) 719 { 832 { 720 struct crypto_skcipher *skcipher = __c 833 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); 721 struct skcipher_alg *alg = crypto_skci 834 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); 722 835 723 alg->exit(skcipher); 836 alg->exit(skcipher); 724 } 837 } 725 838 726 static int crypto_skcipher_init_tfm(struct cry 839 static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm) 727 { 840 { 728 struct crypto_skcipher *skcipher = __c 841 struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm); 729 struct skcipher_alg *alg = crypto_skci 842 struct skcipher_alg *alg = crypto_skcipher_alg(skcipher); 730 843 731 skcipher_set_needkey(skcipher); !! 844 if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type) >> 845 return crypto_init_skcipher_ops_blkcipher(tfm); 732 846 733 if (tfm->__crt_alg->cra_type != &crypt !! 847 if (tfm->__crt_alg->cra_type == &crypto_ablkcipher_type || 734 unsigned am = crypto_skcipher_ !! 848 tfm->__crt_alg->cra_type == &crypto_givcipher_type) 735 unsigned reqsize; !! 849 return crypto_init_skcipher_ops_ablkcipher(tfm); 736 !! 850 737 reqsize = am & ~(crypto_tfm_ct !! 851 skcipher->setkey = skcipher_setkey; 738 reqsize += crypto_skcipher_ivs !! 852 skcipher->encrypt = alg->encrypt; 739 reqsize += crypto_skcipher_sta !! 853 skcipher->decrypt = alg->decrypt; 740 crypto_skcipher_set_reqsize(sk !! 854 skcipher->ivsize = alg->ivsize; >> 855 skcipher->keysize = alg->max_keysize; 741 856 742 return crypto_init_lskcipher_o !! 857 if (skcipher->keysize) 743 } !! 858 crypto_skcipher_set_flags(skcipher, CRYPTO_TFM_NEED_KEY); 744 859 745 if (alg->exit) 860 if (alg->exit) 746 skcipher->base.exit = crypto_s 861 skcipher->base.exit = crypto_skcipher_exit_tfm; 747 862 748 if (alg->init) 863 if (alg->init) 749 return alg->init(skcipher); 864 return alg->init(skcipher); 750 865 751 return 0; 866 return 0; 752 } 867 } 753 868 754 static unsigned int crypto_skcipher_extsize(st << 755 { << 756 if (alg->cra_type != &crypto_skcipher_ << 757 return sizeof(struct crypto_ls << 758 << 759 return crypto_alg_extsize(alg); << 760 } << 761 << 762 static void crypto_skcipher_free_instance(stru 869 static void crypto_skcipher_free_instance(struct crypto_instance *inst) 763 { 870 { 764 struct skcipher_instance *skcipher = 871 struct skcipher_instance *skcipher = 765 container_of(inst, struct skci 872 container_of(inst, struct skcipher_instance, s.base); 766 873 767 skcipher->free(skcipher); 874 skcipher->free(skcipher); 768 } 875 } 769 876 770 static void crypto_skcipher_show(struct seq_fi 877 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) 771 __maybe_unused; 878 __maybe_unused; 772 static void crypto_skcipher_show(struct seq_fi 879 static void crypto_skcipher_show(struct seq_file *m, struct crypto_alg *alg) 773 { 880 { 774 struct skcipher_alg *skcipher = __cryp !! 881 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, >> 882 base); 775 883 776 seq_printf(m, "type : skcipher 884 seq_printf(m, "type : skcipher\n"); 777 seq_printf(m, "async : %s\n", 885 seq_printf(m, "async : %s\n", 778 alg->cra_flags & CRYPTO_ALG 886 alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); 779 seq_printf(m, "blocksize : %u\n", a 887 seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); 780 seq_printf(m, "min keysize : %u\n", s 888 seq_printf(m, "min keysize : %u\n", skcipher->min_keysize); 781 seq_printf(m, "max keysize : %u\n", s 889 seq_printf(m, "max keysize : %u\n", skcipher->max_keysize); 782 seq_printf(m, "ivsize : %u\n", s 890 seq_printf(m, "ivsize : %u\n", skcipher->ivsize); 783 seq_printf(m, "chunksize : %u\n", s 891 seq_printf(m, "chunksize : %u\n", skcipher->chunksize); 784 seq_printf(m, "walksize : %u\n", s 892 seq_printf(m, "walksize : %u\n", skcipher->walksize); 785 seq_printf(m, "statesize : %u\n", s << 786 } 893 } 787 894 788 static int __maybe_unused crypto_skcipher_repo !! 895 #ifdef CONFIG_NET 789 struct sk_buff *skb, struct crypto_alg !! 896 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) 790 { 897 { 791 struct skcipher_alg *skcipher = __cryp << 792 struct crypto_report_blkcipher rblkcip 898 struct crypto_report_blkcipher rblkcipher; >> 899 struct skcipher_alg *skcipher = container_of(alg, struct skcipher_alg, >> 900 base); 793 901 794 memset(&rblkcipher, 0, sizeof(rblkciph !! 902 strncpy(rblkcipher.type, "skcipher", sizeof(rblkcipher.type)); 795 !! 903 strncpy(rblkcipher.geniv, "<none>", sizeof(rblkcipher.geniv)); 796 strscpy(rblkcipher.type, "skcipher", s << 797 strscpy(rblkcipher.geniv, "<none>", si << 798 904 799 rblkcipher.blocksize = alg->cra_blocks 905 rblkcipher.blocksize = alg->cra_blocksize; 800 rblkcipher.min_keysize = skcipher->min 906 rblkcipher.min_keysize = skcipher->min_keysize; 801 rblkcipher.max_keysize = skcipher->max 907 rblkcipher.max_keysize = skcipher->max_keysize; 802 rblkcipher.ivsize = skcipher->ivsize; 908 rblkcipher.ivsize = skcipher->ivsize; 803 909 804 return nla_put(skb, CRYPTOCFGA_REPORT_ !! 910 if (nla_put(skb, CRYPTOCFGA_REPORT_BLKCIPHER, 805 sizeof(rblkcipher), &rb !! 911 sizeof(struct crypto_report_blkcipher), &rblkcipher)) >> 912 goto nla_put_failure; >> 913 return 0; >> 914 >> 915 nla_put_failure: >> 916 return -EMSGSIZE; >> 917 } >> 918 #else >> 919 static int crypto_skcipher_report(struct sk_buff *skb, struct crypto_alg *alg) >> 920 { >> 921 return -ENOSYS; 806 } 922 } >> 923 #endif 807 924 808 static const struct crypto_type crypto_skciphe !! 925 static const struct crypto_type crypto_skcipher_type2 = { 809 .extsize = crypto_skcipher_extsize, 926 .extsize = crypto_skcipher_extsize, 810 .init_tfm = crypto_skcipher_init_tfm, 927 .init_tfm = crypto_skcipher_init_tfm, 811 .free = crypto_skcipher_free_instance, 928 .free = crypto_skcipher_free_instance, 812 #ifdef CONFIG_PROC_FS 929 #ifdef CONFIG_PROC_FS 813 .show = crypto_skcipher_show, 930 .show = crypto_skcipher_show, 814 #endif 931 #endif 815 #if IS_ENABLED(CONFIG_CRYPTO_USER) << 816 .report = crypto_skcipher_report, 932 .report = crypto_skcipher_report, 817 #endif << 818 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 933 .maskclear = ~CRYPTO_ALG_TYPE_MASK, 819 .maskset = CRYPTO_ALG_TYPE_SKCIPHER_MA !! 934 .maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK, 820 .type = CRYPTO_ALG_TYPE_SKCIPHER, 935 .type = CRYPTO_ALG_TYPE_SKCIPHER, 821 .tfmsize = offsetof(struct crypto_skci 936 .tfmsize = offsetof(struct crypto_skcipher, base), 822 }; 937 }; 823 938 824 int crypto_grab_skcipher(struct crypto_skciphe 939 int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, 825 struct crypto_instanc !! 940 const char *name, u32 type, u32 mask) 826 const char *name, u32 << 827 { 941 { 828 spawn->base.frontend = &crypto_skciphe !! 942 spawn->base.frontend = &crypto_skcipher_type2; 829 return crypto_grab_spawn(&spawn->base, !! 943 return crypto_grab_spawn(&spawn->base, name, type, mask); 830 } 944 } 831 EXPORT_SYMBOL_GPL(crypto_grab_skcipher); 945 EXPORT_SYMBOL_GPL(crypto_grab_skcipher); 832 946 833 struct crypto_skcipher *crypto_alloc_skcipher( 947 struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name, 834 948 u32 type, u32 mask) 835 { 949 { 836 return crypto_alloc_tfm(alg_name, &cry !! 950 return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask); 837 } 951 } 838 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); 952 EXPORT_SYMBOL_GPL(crypto_alloc_skcipher); 839 953 840 struct crypto_sync_skcipher *crypto_alloc_sync !! 954 int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask) 841 const char *al << 842 { 955 { 843 struct crypto_skcipher *tfm; !! 956 return crypto_type_has_alg(alg_name, &crypto_skcipher_type2, 844 !! 957 type, mask); 845 /* Only sync algorithms allowed. */ << 846 mask |= CRYPTO_ALG_ASYNC | CRYPTO_ALG_ << 847 << 848 tfm = crypto_alloc_tfm(alg_name, &cryp << 849 << 850 /* << 851 * Make sure we do not allocate someth << 852 * an on-stack request: check the requ << 853 */ << 854 if (!IS_ERR(tfm) && WARN_ON(crypto_skc << 855 MAX_SYNC_S << 856 crypto_free_skcipher(tfm); << 857 return ERR_PTR(-EINVAL); << 858 } << 859 << 860 return (struct crypto_sync_skcipher *) << 861 } << 862 EXPORT_SYMBOL_GPL(crypto_alloc_sync_skcipher); << 863 << 864 int crypto_has_skcipher(const char *alg_name, << 865 { << 866 return crypto_type_has_alg(alg_name, & << 867 } 958 } 868 EXPORT_SYMBOL_GPL(crypto_has_skcipher); !! 959 EXPORT_SYMBOL_GPL(crypto_has_skcipher2); 869 960 870 int skcipher_prepare_alg_common(struct skciphe !! 961 static int skcipher_prepare_alg(struct skcipher_alg *alg) 871 { 962 { 872 struct crypto_alg *base = &alg->base; 963 struct crypto_alg *base = &alg->base; 873 964 874 if (alg->ivsize > PAGE_SIZE / 8 || alg 965 if (alg->ivsize > PAGE_SIZE / 8 || alg->chunksize > PAGE_SIZE / 8 || 875 alg->statesize > PAGE_SIZE / 2 || !! 966 alg->walksize > PAGE_SIZE / 8) 876 (alg->ivsize + alg->statesize) > P << 877 return -EINVAL; 967 return -EINVAL; 878 968 879 if (!alg->chunksize) 969 if (!alg->chunksize) 880 alg->chunksize = base->cra_blo 970 alg->chunksize = base->cra_blocksize; 881 << 882 base->cra_flags &= ~CRYPTO_ALG_TYPE_MA << 883 << 884 return 0; << 885 } << 886 << 887 static int skcipher_prepare_alg(struct skciphe << 888 { << 889 struct crypto_alg *base = &alg->base; << 890 int err; << 891 << 892 err = skcipher_prepare_alg_common(&alg << 893 if (err) << 894 return err; << 895 << 896 if (alg->walksize > PAGE_SIZE / 8) << 897 return -EINVAL; << 898 << 899 if (!alg->walksize) 971 if (!alg->walksize) 900 alg->walksize = alg->chunksize 972 alg->walksize = alg->chunksize; 901 973 902 if (!alg->statesize) { !! 974 base->cra_type = &crypto_skcipher_type2; 903 alg->import = skcipher_noimpor !! 975 base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK; 904 alg->export = skcipher_noexpor << 905 } else if (!(alg->import && alg->expor << 906 return -EINVAL; << 907 << 908 base->cra_type = &crypto_skcipher_type << 909 base->cra_flags |= CRYPTO_ALG_TYPE_SKC 976 base->cra_flags |= CRYPTO_ALG_TYPE_SKCIPHER; 910 977 911 return 0; 978 return 0; 912 } 979 } 913 980 914 int crypto_register_skcipher(struct skcipher_a 981 int crypto_register_skcipher(struct skcipher_alg *alg) 915 { 982 { 916 struct crypto_alg *base = &alg->base; 983 struct crypto_alg *base = &alg->base; 917 int err; 984 int err; 918 985 919 err = skcipher_prepare_alg(alg); 986 err = skcipher_prepare_alg(alg); 920 if (err) 987 if (err) 921 return err; 988 return err; 922 989 923 return crypto_register_alg(base); 990 return crypto_register_alg(base); 924 } 991 } 925 EXPORT_SYMBOL_GPL(crypto_register_skcipher); 992 EXPORT_SYMBOL_GPL(crypto_register_skcipher); 926 993 927 void crypto_unregister_skcipher(struct skciphe 994 void crypto_unregister_skcipher(struct skcipher_alg *alg) 928 { 995 { 929 crypto_unregister_alg(&alg->base); 996 crypto_unregister_alg(&alg->base); 930 } 997 } 931 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); 998 EXPORT_SYMBOL_GPL(crypto_unregister_skcipher); 932 999 933 int crypto_register_skciphers(struct skcipher_ 1000 int crypto_register_skciphers(struct skcipher_alg *algs, int count) 934 { 1001 { 935 int i, ret; 1002 int i, ret; 936 1003 937 for (i = 0; i < count; i++) { 1004 for (i = 0; i < count; i++) { 938 ret = crypto_register_skcipher 1005 ret = crypto_register_skcipher(&algs[i]); 939 if (ret) 1006 if (ret) 940 goto err; 1007 goto err; 941 } 1008 } 942 1009 943 return 0; 1010 return 0; 944 1011 945 err: 1012 err: 946 for (--i; i >= 0; --i) 1013 for (--i; i >= 0; --i) 947 crypto_unregister_skcipher(&al 1014 crypto_unregister_skcipher(&algs[i]); 948 1015 949 return ret; 1016 return ret; 950 } 1017 } 951 EXPORT_SYMBOL_GPL(crypto_register_skciphers); 1018 EXPORT_SYMBOL_GPL(crypto_register_skciphers); 952 1019 953 void crypto_unregister_skciphers(struct skciph 1020 void crypto_unregister_skciphers(struct skcipher_alg *algs, int count) 954 { 1021 { 955 int i; 1022 int i; 956 1023 957 for (i = count - 1; i >= 0; --i) 1024 for (i = count - 1; i >= 0; --i) 958 crypto_unregister_skcipher(&al 1025 crypto_unregister_skcipher(&algs[i]); 959 } 1026 } 960 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers) 1027 EXPORT_SYMBOL_GPL(crypto_unregister_skciphers); 961 1028 962 int skcipher_register_instance(struct crypto_t 1029 int skcipher_register_instance(struct crypto_template *tmpl, 963 struct skcipher_ins 1030 struct skcipher_instance *inst) 964 { 1031 { 965 int err; 1032 int err; 966 1033 967 if (WARN_ON(!inst->free)) << 968 return -EINVAL; << 969 << 970 err = skcipher_prepare_alg(&inst->alg) 1034 err = skcipher_prepare_alg(&inst->alg); 971 if (err) 1035 if (err) 972 return err; 1036 return err; 973 1037 974 return crypto_register_instance(tmpl, 1038 return crypto_register_instance(tmpl, skcipher_crypto_instance(inst)); 975 } 1039 } 976 EXPORT_SYMBOL_GPL(skcipher_register_instance); 1040 EXPORT_SYMBOL_GPL(skcipher_register_instance); 977 1041 978 static int skcipher_setkey_simple(struct crypt << 979 unsigned int << 980 { << 981 struct crypto_cipher *cipher = skciphe << 982 << 983 crypto_cipher_clear_flags(cipher, CRYP << 984 crypto_cipher_set_flags(cipher, crypto << 985 CRYPTO_TFM_REQ << 986 return crypto_cipher_setkey(cipher, ke << 987 } << 988 << 989 static int skcipher_init_tfm_simple(struct cry << 990 { << 991 struct skcipher_instance *inst = skcip << 992 struct crypto_cipher_spawn *spawn = sk << 993 struct skcipher_ctx_simple *ctx = cryp << 994 struct crypto_cipher *cipher; << 995 << 996 cipher = crypto_spawn_cipher(spawn); << 997 if (IS_ERR(cipher)) << 998 return PTR_ERR(cipher); << 999 << 1000 ctx->cipher = cipher; << 1001 return 0; << 1002 } << 1003 << 1004 static void skcipher_exit_tfm_simple(struct c << 1005 { << 1006 struct skcipher_ctx_simple *ctx = cry << 1007 << 1008 crypto_free_cipher(ctx->cipher); << 1009 } << 1010 << 1011 static void skcipher_free_instance_simple(str << 1012 { << 1013 crypto_drop_cipher(skcipher_instance_ << 1014 kfree(inst); << 1015 } << 1016 << 1017 /** << 1018 * skcipher_alloc_instance_simple - allocate << 1019 * << 1020 * Allocate an skcipher_instance for a simple << 1021 * e.g. cbc or ecb. The instance context wil << 1022 * that for the underlying cipher. The {min, << 1023 * alignmask, and priority are set from the u << 1024 * overridden if needed. The tfm context def << 1025 * default ->setkey(), ->init(), and ->exit() << 1026 * << 1027 * @tmpl: the template being instantiated << 1028 * @tb: the template parameters << 1029 * << 1030 * Return: a pointer to the new instance, or << 1031 * needs to register the instance. << 1032 */ << 1033 struct skcipher_instance *skcipher_alloc_inst << 1034 struct crypto_template *tmpl, struct << 1035 { << 1036 u32 mask; << 1037 struct skcipher_instance *inst; << 1038 struct crypto_cipher_spawn *spawn; << 1039 struct crypto_alg *cipher_alg; << 1040 int err; << 1041 << 1042 err = crypto_check_attr_type(tb, CRYP << 1043 if (err) << 1044 return ERR_PTR(err); << 1045 << 1046 inst = kzalloc(sizeof(*inst) + sizeof << 1047 if (!inst) << 1048 return ERR_PTR(-ENOMEM); << 1049 spawn = skcipher_instance_ctx(inst); << 1050 << 1051 err = crypto_grab_cipher(spawn, skcip << 1052 crypto_attr_ << 1053 if (err) << 1054 goto err_free_inst; << 1055 cipher_alg = crypto_spawn_cipher_alg( << 1056 << 1057 err = crypto_inst_setname(skcipher_cr << 1058 cipher_alg) << 1059 if (err) << 1060 goto err_free_inst; << 1061 << 1062 inst->free = skcipher_free_instance_s << 1063 << 1064 /* Default algorithm properties, can << 1065 inst->alg.base.cra_blocksize = cipher << 1066 inst->alg.base.cra_alignmask = cipher << 1067 inst->alg.base.cra_priority = cipher_ << 1068 inst->alg.min_keysize = cipher_alg->c << 1069 inst->alg.max_keysize = cipher_alg->c << 1070 inst->alg.ivsize = cipher_alg->cra_bl << 1071 << 1072 /* Use skcipher_ctx_simple by default << 1073 inst->alg.base.cra_ctxsize = sizeof(s << 1074 inst->alg.setkey = skcipher_setkey_si << 1075 inst->alg.init = skcipher_init_tfm_si << 1076 inst->alg.exit = skcipher_exit_tfm_si << 1077 << 1078 return inst; << 1079 << 1080 err_free_inst: << 1081 skcipher_free_instance_simple(inst); << 1082 return ERR_PTR(err); << 1083 } << 1084 EXPORT_SYMBOL_GPL(skcipher_alloc_instance_sim << 1085 << 1086 MODULE_LICENSE("GPL"); 1042 MODULE_LICENSE("GPL"); 1087 MODULE_DESCRIPTION("Symmetric key cipher type 1043 MODULE_DESCRIPTION("Symmetric key cipher type"); 1088 MODULE_IMPORT_NS(CRYPTO_INTERNAL); << 1089 1044
Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.