~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

TOMOYO Linux Cross Reference
Linux/crypto/scompress.c

Version: ~ [ linux-6.11.5 ] ~ [ linux-6.10.14 ] ~ [ linux-6.9.12 ] ~ [ linux-6.8.12 ] ~ [ linux-6.7.12 ] ~ [ linux-6.6.58 ] ~ [ linux-6.5.13 ] ~ [ linux-6.4.16 ] ~ [ linux-6.3.13 ] ~ [ linux-6.2.16 ] ~ [ linux-6.1.114 ] ~ [ linux-6.0.19 ] ~ [ linux-5.19.17 ] ~ [ linux-5.18.19 ] ~ [ linux-5.17.15 ] ~ [ linux-5.16.20 ] ~ [ linux-5.15.169 ] ~ [ linux-5.14.21 ] ~ [ linux-5.13.19 ] ~ [ linux-5.12.19 ] ~ [ linux-5.11.22 ] ~ [ linux-5.10.228 ] ~ [ linux-5.9.16 ] ~ [ linux-5.8.18 ] ~ [ linux-5.7.19 ] ~ [ linux-5.6.19 ] ~ [ linux-5.5.19 ] ~ [ linux-5.4.284 ] ~ [ linux-5.3.18 ] ~ [ linux-5.2.21 ] ~ [ linux-5.1.21 ] ~ [ linux-5.0.21 ] ~ [ linux-4.20.17 ] ~ [ linux-4.19.322 ] ~ [ linux-4.18.20 ] ~ [ linux-4.17.19 ] ~ [ linux-4.16.18 ] ~ [ linux-4.15.18 ] ~ [ linux-4.14.336 ] ~ [ linux-4.13.16 ] ~ [ linux-4.12.14 ] ~ [ linux-4.11.12 ] ~ [ linux-4.10.17 ] ~ [ linux-4.9.337 ] ~ [ linux-4.4.302 ] ~ [ linux-3.10.108 ] ~ [ linux-2.6.32.71 ] ~ [ linux-2.6.0 ] ~ [ linux-2.4.37.11 ] ~ [ unix-v6-master ] ~ [ ccs-tools-1.8.9 ] ~ [ policy-sample ] ~
Architecture: ~ [ i386 ] ~ [ alpha ] ~ [ m68k ] ~ [ mips ] ~ [ ppc ] ~ [ sparc ] ~ [ sparc64 ] ~

  1 // SPDX-License-Identifier: GPL-2.0-or-later
  2 /*
  3  * Synchronous Compression operations
  4  *
  5  * Copyright 2015 LG Electronics Inc.
  6  * Copyright (c) 2016, Intel Corporation
  7  * Author: Giovanni Cabiddu <giovanni.cabiddu@intel.com>
  8  */
  9 
 10 #include <crypto/internal/acompress.h>
 11 #include <crypto/internal/scompress.h>
 12 #include <crypto/scatterwalk.h>
 13 #include <linux/cryptouser.h>
 14 #include <linux/err.h>
 15 #include <linux/kernel.h>
 16 #include <linux/module.h>
 17 #include <linux/scatterlist.h>
 18 #include <linux/seq_file.h>
 19 #include <linux/slab.h>
 20 #include <linux/string.h>
 21 #include <linux/vmalloc.h>
 22 #include <net/netlink.h>
 23 
 24 #include "compress.h"
 25 
 26 struct scomp_scratch {
 27         spinlock_t      lock;
 28         void            *src;
 29         void            *dst;
 30 };
 31 
 32 static DEFINE_PER_CPU(struct scomp_scratch, scomp_scratch) = {
 33         .lock = __SPIN_LOCK_UNLOCKED(scomp_scratch.lock),
 34 };
 35 
 36 static const struct crypto_type crypto_scomp_type;
 37 static int scomp_scratch_users;
 38 static DEFINE_MUTEX(scomp_lock);
 39 
 40 static int __maybe_unused crypto_scomp_report(
 41         struct sk_buff *skb, struct crypto_alg *alg)
 42 {
 43         struct crypto_report_comp rscomp;
 44 
 45         memset(&rscomp, 0, sizeof(rscomp));
 46 
 47         strscpy(rscomp.type, "scomp", sizeof(rscomp.type));
 48 
 49         return nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
 50                        sizeof(rscomp), &rscomp);
 51 }
 52 
 53 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
 54         __maybe_unused;
 55 
 56 static void crypto_scomp_show(struct seq_file *m, struct crypto_alg *alg)
 57 {
 58         seq_puts(m, "type         : scomp\n");
 59 }
 60 
 61 static void crypto_scomp_free_scratches(void)
 62 {
 63         struct scomp_scratch *scratch;
 64         int i;
 65 
 66         for_each_possible_cpu(i) {
 67                 scratch = per_cpu_ptr(&scomp_scratch, i);
 68 
 69                 vfree(scratch->src);
 70                 vfree(scratch->dst);
 71                 scratch->src = NULL;
 72                 scratch->dst = NULL;
 73         }
 74 }
 75 
 76 static int crypto_scomp_alloc_scratches(void)
 77 {
 78         struct scomp_scratch *scratch;
 79         int i;
 80 
 81         for_each_possible_cpu(i) {
 82                 void *mem;
 83 
 84                 scratch = per_cpu_ptr(&scomp_scratch, i);
 85 
 86                 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
 87                 if (!mem)
 88                         goto error;
 89                 scratch->src = mem;
 90                 mem = vmalloc_node(SCOMP_SCRATCH_SIZE, cpu_to_node(i));
 91                 if (!mem)
 92                         goto error;
 93                 scratch->dst = mem;
 94         }
 95         return 0;
 96 error:
 97         crypto_scomp_free_scratches();
 98         return -ENOMEM;
 99 }
100 
101 static int crypto_scomp_init_tfm(struct crypto_tfm *tfm)
102 {
103         int ret = 0;
104 
105         mutex_lock(&scomp_lock);
106         if (!scomp_scratch_users++)
107                 ret = crypto_scomp_alloc_scratches();
108         mutex_unlock(&scomp_lock);
109 
110         return ret;
111 }
112 
113 static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
114 {
115         struct crypto_acomp *tfm = crypto_acomp_reqtfm(req);
116         void **tfm_ctx = acomp_tfm_ctx(tfm);
117         struct crypto_scomp *scomp = *tfm_ctx;
118         void **ctx = acomp_request_ctx(req);
119         struct scomp_scratch *scratch;
120         void *src, *dst;
121         unsigned int dlen;
122         int ret;
123 
124         if (!req->src || !req->slen || req->slen > SCOMP_SCRATCH_SIZE)
125                 return -EINVAL;
126 
127         if (req->dst && !req->dlen)
128                 return -EINVAL;
129 
130         if (!req->dlen || req->dlen > SCOMP_SCRATCH_SIZE)
131                 req->dlen = SCOMP_SCRATCH_SIZE;
132 
133         dlen = req->dlen;
134 
135         scratch = raw_cpu_ptr(&scomp_scratch);
136         spin_lock(&scratch->lock);
137 
138         if (sg_nents(req->src) == 1 && !PageHighMem(sg_page(req->src))) {
139                 src = page_to_virt(sg_page(req->src)) + req->src->offset;
140         } else {
141                 scatterwalk_map_and_copy(scratch->src, req->src, 0,
142                                          req->slen, 0);
143                 src = scratch->src;
144         }
145 
146         if (req->dst && sg_nents(req->dst) == 1 && !PageHighMem(sg_page(req->dst)))
147                 dst = page_to_virt(sg_page(req->dst)) + req->dst->offset;
148         else
149                 dst = scratch->dst;
150 
151         if (dir)
152                 ret = crypto_scomp_compress(scomp, src, req->slen,
153                                             dst, &req->dlen, *ctx);
154         else
155                 ret = crypto_scomp_decompress(scomp, src, req->slen,
156                                               dst, &req->dlen, *ctx);
157         if (!ret) {
158                 if (!req->dst) {
159                         req->dst = sgl_alloc(req->dlen, GFP_ATOMIC, NULL);
160                         if (!req->dst) {
161                                 ret = -ENOMEM;
162                                 goto out;
163                         }
164                 } else if (req->dlen > dlen) {
165                         ret = -ENOSPC;
166                         goto out;
167                 }
168                 if (dst == scratch->dst) {
169                         scatterwalk_map_and_copy(scratch->dst, req->dst, 0,
170                                                  req->dlen, 1);
171                 } else {
172                         int nr_pages = DIV_ROUND_UP(req->dst->offset + req->dlen, PAGE_SIZE);
173                         int i;
174                         struct page *dst_page = sg_page(req->dst);
175 
176                         for (i = 0; i < nr_pages; i++)
177                                 flush_dcache_page(dst_page + i);
178                 }
179         }
180 out:
181         spin_unlock(&scratch->lock);
182         return ret;
183 }
184 
185 static int scomp_acomp_compress(struct acomp_req *req)
186 {
187         return scomp_acomp_comp_decomp(req, 1);
188 }
189 
190 static int scomp_acomp_decompress(struct acomp_req *req)
191 {
192         return scomp_acomp_comp_decomp(req, 0);
193 }
194 
195 static void crypto_exit_scomp_ops_async(struct crypto_tfm *tfm)
196 {
197         struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
198 
199         crypto_free_scomp(*ctx);
200 
201         mutex_lock(&scomp_lock);
202         if (!--scomp_scratch_users)
203                 crypto_scomp_free_scratches();
204         mutex_unlock(&scomp_lock);
205 }
206 
207 int crypto_init_scomp_ops_async(struct crypto_tfm *tfm)
208 {
209         struct crypto_alg *calg = tfm->__crt_alg;
210         struct crypto_acomp *crt = __crypto_acomp_tfm(tfm);
211         struct crypto_scomp **ctx = crypto_tfm_ctx(tfm);
212         struct crypto_scomp *scomp;
213 
214         if (!crypto_mod_get(calg))
215                 return -EAGAIN;
216 
217         scomp = crypto_create_tfm(calg, &crypto_scomp_type);
218         if (IS_ERR(scomp)) {
219                 crypto_mod_put(calg);
220                 return PTR_ERR(scomp);
221         }
222 
223         *ctx = scomp;
224         tfm->exit = crypto_exit_scomp_ops_async;
225 
226         crt->compress = scomp_acomp_compress;
227         crt->decompress = scomp_acomp_decompress;
228         crt->dst_free = sgl_free;
229         crt->reqsize = sizeof(void *);
230 
231         return 0;
232 }
233 
234 struct acomp_req *crypto_acomp_scomp_alloc_ctx(struct acomp_req *req)
235 {
236         struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
237         struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
238         struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
239         struct crypto_scomp *scomp = *tfm_ctx;
240         void *ctx;
241 
242         ctx = crypto_scomp_alloc_ctx(scomp);
243         if (IS_ERR(ctx)) {
244                 kfree(req);
245                 return NULL;
246         }
247 
248         *req->__ctx = ctx;
249 
250         return req;
251 }
252 
253 void crypto_acomp_scomp_free_ctx(struct acomp_req *req)
254 {
255         struct crypto_acomp *acomp = crypto_acomp_reqtfm(req);
256         struct crypto_tfm *tfm = crypto_acomp_tfm(acomp);
257         struct crypto_scomp **tfm_ctx = crypto_tfm_ctx(tfm);
258         struct crypto_scomp *scomp = *tfm_ctx;
259         void *ctx = *req->__ctx;
260 
261         if (ctx)
262                 crypto_scomp_free_ctx(scomp, ctx);
263 }
264 
265 static const struct crypto_type crypto_scomp_type = {
266         .extsize = crypto_alg_extsize,
267         .init_tfm = crypto_scomp_init_tfm,
268 #ifdef CONFIG_PROC_FS
269         .show = crypto_scomp_show,
270 #endif
271 #if IS_ENABLED(CONFIG_CRYPTO_USER)
272         .report = crypto_scomp_report,
273 #endif
274         .maskclear = ~CRYPTO_ALG_TYPE_MASK,
275         .maskset = CRYPTO_ALG_TYPE_MASK,
276         .type = CRYPTO_ALG_TYPE_SCOMPRESS,
277         .tfmsize = offsetof(struct crypto_scomp, base),
278 };
279 
280 int crypto_register_scomp(struct scomp_alg *alg)
281 {
282         struct crypto_alg *base = &alg->calg.base;
283 
284         comp_prepare_alg(&alg->calg);
285 
286         base->cra_type = &crypto_scomp_type;
287         base->cra_flags |= CRYPTO_ALG_TYPE_SCOMPRESS;
288 
289         return crypto_register_alg(base);
290 }
291 EXPORT_SYMBOL_GPL(crypto_register_scomp);
292 
293 void crypto_unregister_scomp(struct scomp_alg *alg)
294 {
295         crypto_unregister_alg(&alg->base);
296 }
297 EXPORT_SYMBOL_GPL(crypto_unregister_scomp);
298 
299 int crypto_register_scomps(struct scomp_alg *algs, int count)
300 {
301         int i, ret;
302 
303         for (i = 0; i < count; i++) {
304                 ret = crypto_register_scomp(&algs[i]);
305                 if (ret)
306                         goto err;
307         }
308 
309         return 0;
310 
311 err:
312         for (--i; i >= 0; --i)
313                 crypto_unregister_scomp(&algs[i]);
314 
315         return ret;
316 }
317 EXPORT_SYMBOL_GPL(crypto_register_scomps);
318 
319 void crypto_unregister_scomps(struct scomp_alg *algs, int count)
320 {
321         int i;
322 
323         for (i = count - 1; i >= 0; --i)
324                 crypto_unregister_scomp(&algs[i]);
325 }
326 EXPORT_SYMBOL_GPL(crypto_unregister_scomps);
327 
328 MODULE_LICENSE("GPL");
329 MODULE_DESCRIPTION("Synchronous compression type");
330 

~ [ source navigation ] ~ [ diff markup ] ~ [ identifier search ] ~

kernel.org | git.kernel.org | LWN.net | Project Home | SVN repository | Mail admin

Linux® is a registered trademark of Linus Torvalds in the United States and other countries.
TOMOYO® is a registered trademark of NTT DATA CORPORATION.

sflogo.php