diff options
Diffstat (limited to 'include/crypto')
-rw-r--r-- | include/crypto/acompress.h | 382 | ||||
-rw-r--r-- | include/crypto/algapi.h | 33 | ||||
-rw-r--r-- | include/crypto/authenc.h | 2 | ||||
-rw-r--r-- | include/crypto/chacha.h | 11 | ||||
-rw-r--r-- | include/crypto/ctr.h | 2 | ||||
-rw-r--r-- | include/crypto/hash.h | 72 | ||||
-rw-r--r-- | include/crypto/hkdf.h | 20 | ||||
-rw-r--r-- | include/crypto/internal/acompress.h | 85 | ||||
-rw-r--r-- | include/crypto/internal/hash.h | 15 | ||||
-rw-r--r-- | include/crypto/internal/scompress.h | 18 | ||||
-rw-r--r-- | include/crypto/internal/skcipher.h | 40 | ||||
-rw-r--r-- | include/crypto/krb5.h | 160 | ||||
-rw-r--r-- | include/crypto/scatterwalk.h | 222 | ||||
-rw-r--r-- | include/crypto/sig.h | 5 | ||||
-rw-r--r-- | include/crypto/skcipher.h | 19 |
15 files changed, 931 insertions, 155 deletions
diff --git a/include/crypto/acompress.h b/include/crypto/acompress.h index 54937b615239..c497c73baf13 100644 --- a/include/crypto/acompress.h +++ b/include/crypto/acompress.h @@ -10,30 +10,105 @@ #define _CRYPTO_ACOMP_H #include <linux/atomic.h> +#include <linux/args.h> +#include <linux/compiler_types.h> #include <linux/container_of.h> #include <linux/crypto.h> +#include <linux/err.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/spinlock_types.h> +#include <linux/types.h> + +/* Set this bit if source is virtual address instead of SG list. */ +#define CRYPTO_ACOMP_REQ_SRC_VIRT 0x00000002 + +/* Set this bit for if virtual address source cannot be used for DMA. */ +#define CRYPTO_ACOMP_REQ_SRC_NONDMA 0x00000004 + +/* Set this bit if destination is virtual address instead of SG list. */ +#define CRYPTO_ACOMP_REQ_DST_VIRT 0x00000008 + +/* Set this bit for if virtual address destination cannot be used for DMA. */ +#define CRYPTO_ACOMP_REQ_DST_NONDMA 0x00000010 + +/* Set this bit if source is a folio. */ +#define CRYPTO_ACOMP_REQ_SRC_FOLIO 0x00000020 + +/* Set this bit if destination is a folio. */ +#define CRYPTO_ACOMP_REQ_DST_FOLIO 0x00000040 -#define CRYPTO_ACOMP_ALLOC_OUTPUT 0x00000001 #define CRYPTO_ACOMP_DST_MAX 131072 +#define MAX_SYNC_COMP_REQSIZE 0 + +#define ACOMP_REQUEST_ALLOC(name, tfm, gfp) \ + char __##name##_req[sizeof(struct acomp_req) + \ + MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \ + struct acomp_req *name = acomp_request_on_stack_init( \ + __##name##_req, (tfm), (gfp), false) + +struct acomp_req; +struct folio; + +struct acomp_req_chain { + struct list_head head; + struct acomp_req *req0; + struct acomp_req *cur; + int (*op)(struct acomp_req *req); + crypto_completion_t compl; + void *data; + struct scatterlist ssg; + struct scatterlist dsg; + union { + const u8 *src; + struct folio *sfolio; + }; + union { + u8 *dst; + struct folio *dfolio; + }; + size_t soff; + size_t doff; + u32 flags; +}; + /** * struct acomp_req - asynchronous (de)compression request * * @base: Common attributes for asynchronous crypto requests - * @src: Source Data - * @dst: Destination data + * @src: Source scatterlist + * @dst: Destination scatterlist + * @svirt: Source virtual address + * @dvirt: Destination virtual address + * @sfolio: Source folio + * @soff: Source folio offset + * @dfolio: Destination folio + * @doff: Destination folio offset * @slen: Size of the input buffer * @dlen: Size of the output buffer and number of bytes produced - * @flags: Internal flags + * @chain: Private API code data, do not use * @__ctx: Start of private context data */ struct acomp_req { struct crypto_async_request base; - struct scatterlist *src; - struct scatterlist *dst; + union { + struct scatterlist *src; + const u8 *svirt; + struct folio *sfolio; + }; + union { + struct scatterlist *dst; + u8 *dvirt; + struct folio *dfolio; + }; + size_t soff; + size_t doff; unsigned int slen; unsigned int dlen; - u32 flags; + + struct acomp_req_chain chain; + void *__ctx[] CRYPTO_MINALIGN_ATTR; }; @@ -43,21 +118,26 @@ struct acomp_req { * * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation - * @dst_free: Frees destination buffer if allocated inside the - * algorithm * @reqsize: Context size for (de)compression requests + * @fb: Synchronous fallback tfm * @base: Common crypto API algorithm data structure */ struct crypto_acomp { int (*compress)(struct acomp_req *req); int (*decompress)(struct acomp_req *req); - void (*dst_free)(struct scatterlist *dst); unsigned int reqsize; + struct crypto_acomp *fb; struct crypto_tfm base; }; +struct crypto_acomp_stream { + spinlock_t lock; + void *ctx; +}; + #define COMP_ALG_COMMON { \ struct crypto_alg base; \ + struct crypto_acomp_stream __percpu *stream; \ } struct comp_alg_common COMP_ALG_COMMON; @@ -168,14 +248,67 @@ static inline int crypto_has_acomp(const char *alg_name, u32 type, u32 mask) return crypto_has_alg(alg_name, type, mask); } +static inline const char *crypto_acomp_alg_name(struct crypto_acomp *tfm) +{ + return crypto_tfm_alg_name(crypto_acomp_tfm(tfm)); +} + +static inline const char *crypto_acomp_driver_name(struct crypto_acomp *tfm) +{ + return crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)); +} + /** * acomp_request_alloc() -- allocates asynchronous (de)compression request * * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * @gfp: gfp to pass to kzalloc (defaults to GFP_KERNEL) * * Return: allocated handle in case of success or NULL in case of an error */ -struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm); +static inline struct acomp_req *acomp_request_alloc_extra_noprof( + struct crypto_acomp *tfm, size_t extra, gfp_t gfp) +{ + struct acomp_req *req; + size_t len; + + len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN); + if (check_add_overflow(len, extra, &len)) + return NULL; + + req = kzalloc_noprof(len, gfp); + if (likely(req)) + acomp_request_set_tfm(req, tfm); + return req; +} +#define acomp_request_alloc_noprof(tfm, ...) \ + CONCATENATE(acomp_request_alloc_noprof_, COUNT_ARGS(__VA_ARGS__))( \ + tfm, ##__VA_ARGS__) +#define acomp_request_alloc_noprof_0(tfm) \ + acomp_request_alloc_noprof_1(tfm, GFP_KERNEL) +#define acomp_request_alloc_noprof_1(tfm, gfp) \ + acomp_request_alloc_extra_noprof(tfm, 0, gfp) +#define acomp_request_alloc(...) alloc_hooks(acomp_request_alloc_noprof(__VA_ARGS__)) + +/** + * acomp_request_alloc_extra() -- allocate acomp request with extra memory + * + * @tfm: ACOMPRESS tfm handle allocated with crypto_alloc_acomp() + * @extra: amount of extra memory + * @gfp: gfp to pass to kzalloc + * + * Return: allocated handle in case of success or NULL in case of an error + */ +#define acomp_request_alloc_extra(...) alloc_hooks(acomp_request_alloc_extra_noprof(__VA_ARGS__)) + +static inline void *acomp_request_extra(struct acomp_req *req) +{ + struct crypto_acomp *tfm = crypto_acomp_reqtfm(req); + size_t len; + + len = ALIGN(sizeof(*req) + crypto_acomp_reqsize(tfm), CRYPTO_MINALIGN); + return (void *)((char *)req + len); +} /** * acomp_request_free() -- zeroize and free asynchronous (de)compression @@ -184,7 +317,12 @@ struct acomp_req *acomp_request_alloc(struct crypto_acomp *tfm); * * @req: request to free */ -void acomp_request_free(struct acomp_req *req); +static inline void acomp_request_free(struct acomp_req *req) +{ + if (!req || (req->base.flags & CRYPTO_TFM_REQ_ON_STACK)) + return; + kfree_sensitive(req); +} /** * acomp_request_set_callback() -- Sets an asynchronous callback @@ -202,10 +340,17 @@ static inline void acomp_request_set_callback(struct acomp_req *req, crypto_completion_t cmpl, void *data) { + u32 keep = CRYPTO_ACOMP_REQ_SRC_VIRT | CRYPTO_ACOMP_REQ_SRC_NONDMA | + CRYPTO_ACOMP_REQ_DST_VIRT | CRYPTO_ACOMP_REQ_DST_NONDMA | + CRYPTO_ACOMP_REQ_SRC_FOLIO | CRYPTO_ACOMP_REQ_DST_FOLIO | + CRYPTO_TFM_REQ_ON_STACK; + req->base.complete = cmpl; req->base.data = data; - req->base.flags &= CRYPTO_ACOMP_ALLOC_OUTPUT; - req->base.flags |= flgs & ~CRYPTO_ACOMP_ALLOC_OUTPUT; + req->base.flags &= keep; + req->base.flags |= flgs & ~keep; + + crypto_reqchain_init(&req->base); } /** @@ -232,9 +377,191 @@ static inline void acomp_request_set_params(struct acomp_req *req, req->slen = slen; req->dlen = dlen; - req->flags &= ~CRYPTO_ACOMP_ALLOC_OUTPUT; - if (!req->dst) - req->flags |= CRYPTO_ACOMP_ALLOC_OUTPUT; + req->base.flags &= ~(CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_SRC_NONDMA | + CRYPTO_ACOMP_REQ_SRC_FOLIO | + CRYPTO_ACOMP_REQ_DST_FOLIO | + CRYPTO_ACOMP_REQ_DST_VIRT | + CRYPTO_ACOMP_REQ_DST_NONDMA); +} + +/** + * acomp_request_set_src_sg() -- Sets source scatterlist + * + * Sets source scatterlist required by an acomp operation. + * + * @req: asynchronous compress request + * @src: pointer to input buffer scatterlist + * @slen: size of the input buffer + */ +static inline void acomp_request_set_src_sg(struct acomp_req *req, + struct scatterlist *src, + unsigned int slen) +{ + req->src = src; + req->slen = slen; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT; + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO; +} + +/** + * acomp_request_set_src_dma() -- Sets DMA source virtual address + * + * Sets source virtual address required by an acomp operation. + * The address must be usable for DMA. + * + * @req: asynchronous compress request + * @src: virtual address pointer to input buffer + * @slen: size of the input buffer + */ +static inline void acomp_request_set_src_dma(struct acomp_req *req, + const u8 *src, unsigned int slen) +{ + req->svirt = src; + req->slen = slen; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO; + req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; +} + +/** + * acomp_request_set_src_nondma() -- Sets non-DMA source virtual address + * + * Sets source virtual address required by an acomp operation. + * The address can not be used for DMA. + * + * @req: asynchronous compress request + * @src: virtual address pointer to input buffer + * @slen: size of the input buffer + */ +static inline void acomp_request_set_src_nondma(struct acomp_req *req, + const u8 *src, + unsigned int slen) +{ + req->svirt = src; + req->slen = slen; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_FOLIO; + req->base.flags |= CRYPTO_ACOMP_REQ_SRC_NONDMA; + req->base.flags |= CRYPTO_ACOMP_REQ_SRC_VIRT; +} + +/** + * acomp_request_set_src_folio() -- Sets source folio + * + * Sets source folio required by an acomp operation. + * + * @req: asynchronous compress request + * @folio: pointer to input folio + * @off: input folio offset + * @len: size of the input buffer + */ +static inline void acomp_request_set_src_folio(struct acomp_req *req, + struct folio *folio, size_t off, + unsigned int len) +{ + req->sfolio = folio; + req->soff = off; + req->slen = len; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_SRC_VIRT; + req->base.flags |= CRYPTO_ACOMP_REQ_SRC_FOLIO; +} + +/** + * acomp_request_set_dst_sg() -- Sets destination scatterlist + * + * Sets destination scatterlist required by an acomp operation. + * + * @req: asynchronous compress request + * @dst: pointer to output buffer scatterlist + * @dlen: size of the output buffer + */ +static inline void acomp_request_set_dst_sg(struct acomp_req *req, + struct scatterlist *dst, + unsigned int dlen) +{ + req->dst = dst; + req->dlen = dlen; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT; + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO; +} + +/** + * acomp_request_set_dst_dma() -- Sets DMA destination virtual address + * + * Sets destination virtual address required by an acomp operation. + * The address must be usable for DMA. + * + * @req: asynchronous compress request + * @dst: virtual address pointer to output buffer + * @dlen: size of the output buffer + */ +static inline void acomp_request_set_dst_dma(struct acomp_req *req, + u8 *dst, unsigned int dlen) +{ + req->dvirt = dst; + req->dlen = dlen; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO; + req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; +} + +/** + * acomp_request_set_dst_nondma() -- Sets non-DMA destination virtual address + * + * Sets destination virtual address required by an acomp operation. + * The address can not be used for DMA. + * + * @req: asynchronous compress request + * @dst: virtual address pointer to output buffer + * @dlen: size of the output buffer + */ +static inline void acomp_request_set_dst_nondma(struct acomp_req *req, + u8 *dst, unsigned int dlen) +{ + req->dvirt = dst; + req->dlen = dlen; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_FOLIO; + req->base.flags |= CRYPTO_ACOMP_REQ_DST_NONDMA; + req->base.flags |= CRYPTO_ACOMP_REQ_DST_VIRT; +} + +/** + * acomp_request_set_dst_folio() -- Sets destination folio + * + * Sets destination folio required by an acomp operation. + * + * @req: asynchronous compress request + * @folio: pointer to input folio + * @off: input folio offset + * @len: size of the input buffer + */ +static inline void acomp_request_set_dst_folio(struct acomp_req *req, + struct folio *folio, size_t off, + unsigned int len) +{ + req->dfolio = folio; + req->doff = off; + req->dlen = len; + + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_NONDMA; + req->base.flags &= ~CRYPTO_ACOMP_REQ_DST_VIRT; + req->base.flags |= CRYPTO_ACOMP_REQ_DST_FOLIO; +} + +static inline void acomp_request_chain(struct acomp_req *req, + struct acomp_req *head) +{ + crypto_request_chain(&req->base, &head->base); } /** @@ -246,10 +573,7 @@ static inline void acomp_request_set_params(struct acomp_req *req, * * Return: zero on success; error code in case of error */ -static inline int crypto_acomp_compress(struct acomp_req *req) -{ - return crypto_acomp_reqtfm(req)->compress(req); -} +int crypto_acomp_compress(struct acomp_req *req); /** * crypto_acomp_decompress() -- Invoke asynchronous decompress operation @@ -260,9 +584,21 @@ static inline int crypto_acomp_compress(struct acomp_req *req) * * Return: zero on success; error code in case of error */ -static inline int crypto_acomp_decompress(struct acomp_req *req) +int crypto_acomp_decompress(struct acomp_req *req); + +static inline struct acomp_req *acomp_request_on_stack_init( + char *buf, struct crypto_acomp *tfm, gfp_t gfp, bool stackonly) { - return crypto_acomp_reqtfm(req)->decompress(req); + struct acomp_req *req; + + if (!stackonly && (req = acomp_request_alloc(tfm, gfp))) + return req; + + req = (void *)buf; + acomp_request_set_tfm(req, tfm->fb); + req->base.flags = CRYPTO_TFM_REQ_ON_STACK; + + return req; } #endif diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 156de41ca760..6e07bbc04089 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -11,6 +11,7 @@ #include <linux/align.h> #include <linux/cache.h> #include <linux/crypto.h> +#include <linux/list.h> #include <linux/types.h> #include <linux/workqueue.h> @@ -53,20 +54,7 @@ struct rtattr; struct scatterlist; struct seq_file; struct sk_buff; - -struct crypto_type { - unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask); - unsigned int (*extsize)(struct crypto_alg *alg); - int (*init_tfm)(struct crypto_tfm *tfm); - void (*show)(struct seq_file *m, struct crypto_alg *alg); - int (*report)(struct sk_buff *skb, struct crypto_alg *alg); - void (*free)(struct crypto_instance *inst); - - unsigned int type; - unsigned int maskclear; - unsigned int maskset; - unsigned int tfmsize; -}; +union crypto_no_such_thing; struct crypto_instance { struct crypto_alg alg; @@ -119,6 +107,13 @@ struct crypto_queue { }; struct scatter_walk { + /* Must be the first member, see struct skcipher_walk. */ + union { + void *const addr; + + /* Private API field, do not touch. */ + union crypto_no_such_thing *__addr; + }; struct scatterlist *sg; unsigned int offset; }; @@ -271,4 +266,14 @@ static inline u32 crypto_tfm_alg_type(struct crypto_tfm *tfm) return tfm->__crt_alg->cra_flags & CRYPTO_ALG_TYPE_MASK; } +static inline bool crypto_request_chained(struct crypto_async_request *req) +{ + return !list_empty(&req->list); +} + +static inline bool crypto_tfm_req_chain(struct crypto_tfm *tfm) +{ + return tfm->__crt_alg->cra_flags & CRYPTO_ALG_REQ_CHAIN; +} + #endif /* _CRYPTO_ALGAPI_H */ diff --git a/include/crypto/authenc.h b/include/crypto/authenc.h index 5f92a986083c..15a9caa2354a 100644 --- a/include/crypto/authenc.h +++ b/include/crypto/authenc.h @@ -28,5 +28,7 @@ struct crypto_authenc_keys { int crypto_authenc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, unsigned int keylen); +int crypto_krb5enc_extractkeys(struct crypto_authenc_keys *keys, const u8 *key, + unsigned int keylen); #endif /* _CRYPTO_AUTHENC_H */ diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h index 5bae6a55b333..f8cc073bba41 100644 --- a/include/crypto/chacha.h +++ b/include/crypto/chacha.h @@ -62,8 +62,7 @@ static inline void chacha_init_consts(u32 *state) state[3] = CHACHA_CONSTANT_TE_K; } -void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv); -static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv) +static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv) { chacha_init_consts(state); state[4] = key[0]; @@ -80,14 +79,6 @@ static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv) state[15] = get_unaligned_le32(iv + 12); } -static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv) -{ - if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA)) - chacha_init_arch(state, key, iv); - else - chacha_init_generic(state, key, iv); -} - void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes, int nrounds); void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src, diff --git a/include/crypto/ctr.h b/include/crypto/ctr.h index a1c66d1001af..da1ee73e9ce9 100644 --- a/include/crypto/ctr.h +++ b/include/crypto/ctr.h @@ -34,8 +34,8 @@ static inline int crypto_ctr_encrypt_walk(struct skcipher_request *req, err = skcipher_walk_virt(&walk, req, false); while (walk.nbytes > 0) { + const u8 *src = walk.src.virt.addr; u8 *dst = walk.dst.virt.addr; - u8 *src = walk.src.virt.addr; int nbytes = walk.nbytes; int tail = 0; diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 2d5ea9f9ff43..2aa83ee0ec98 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -12,6 +12,9 @@ #include <linux/crypto.h> #include <linux/string.h> +/* Set this bit for virtual address instead of SG list. */ +#define CRYPTO_AHASH_REQ_VIRT 0x00000001 + struct crypto_ahash; /** @@ -52,12 +55,12 @@ struct ahash_request { struct crypto_async_request base; unsigned int nbytes; - struct scatterlist *src; + union { + struct scatterlist *src; + const u8 *svirt; + }; u8 *result; - /* This field may only be used by the ahash API code. */ - void *priv; - void *__ctx[] CRYPTO_MINALIGN_ATTR; }; @@ -132,6 +135,7 @@ struct ahash_request { * This is a counterpart to @init_tfm, used to remove * various changes set in @init_tfm. * @clone_tfm: Copy transform into new object, may allocate memory. + * @reqsize: Size of the request context. * @halg: see struct hash_alg_common */ struct ahash_alg { @@ -148,6 +152,8 @@ struct ahash_alg { void (*exit_tfm)(struct crypto_ahash *tfm); int (*clone_tfm)(struct crypto_ahash *dst, struct crypto_ahash *src); + unsigned int reqsize; + struct hash_alg_common halg; }; @@ -575,16 +581,7 @@ static inline struct ahash_request *ahash_request_alloc_noprof( * ahash_request_free() - zeroize and free the request data structure * @req: request data structure cipher handle to be freed */ -static inline void ahash_request_free(struct ahash_request *req) -{ - kfree_sensitive(req); -} - -static inline void ahash_request_zero(struct ahash_request *req) -{ - memzero_explicit(req, sizeof(*req) + - crypto_ahash_reqsize(crypto_ahash_reqtfm(req))); -} +void ahash_request_free(struct ahash_request *req); static inline struct ahash_request *ahash_request_cast( struct crypto_async_request *req) @@ -622,9 +619,14 @@ static inline void ahash_request_set_callback(struct ahash_request *req, crypto_completion_t compl, void *data) { + u32 keep = CRYPTO_AHASH_REQ_VIRT; + req->base.complete = compl; req->base.data = data; - req->base.flags = flags; + flags &= ~keep; + req->base.flags &= keep; + req->base.flags |= flags; + crypto_reqchain_init(&req->base); } /** @@ -647,6 +649,36 @@ static inline void ahash_request_set_crypt(struct ahash_request *req, req->src = src; req->nbytes = nbytes; req->result = result; + req->base.flags &= ~CRYPTO_AHASH_REQ_VIRT; +} + +/** + * ahash_request_set_virt() - set virtual address data buffers + * @req: ahash_request handle to be updated + * @src: source virtual address + * @result: buffer that is filled with the message digest -- the caller must + * ensure that the buffer has sufficient space by, for example, calling + * crypto_ahash_digestsize() + * @nbytes: number of bytes to process from the source virtual address + * + * By using this call, the caller references the source virtual address. + * The source virtual address points to the data the message digest is to + * be calculated for. + */ +static inline void ahash_request_set_virt(struct ahash_request *req, + const u8 *src, u8 *result, + unsigned int nbytes) +{ + req->svirt = src; + req->nbytes = nbytes; + req->result = result; + req->base.flags |= CRYPTO_AHASH_REQ_VIRT; +} + +static inline void ahash_request_chain(struct ahash_request *req, + struct ahash_request *head) +{ + crypto_request_chain(&req->base, &head->base); } /** @@ -950,4 +982,14 @@ static inline void shash_desc_zero(struct shash_desc *desc) sizeof(*desc) + crypto_shash_descsize(desc->tfm)); } +static inline int ahash_request_err(struct ahash_request *req) +{ + return req->base.err; +} + +static inline bool ahash_is_async(struct crypto_ahash *tfm) +{ + return crypto_tfm_is_async(&tfm->base); +} + #endif /* _CRYPTO_HASH_H */ diff --git a/include/crypto/hkdf.h b/include/crypto/hkdf.h new file mode 100644 index 000000000000..6a9678f508f5 --- /dev/null +++ b/include/crypto/hkdf.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * HKDF: HMAC-based Key Derivation Function (HKDF), RFC 5869 + * + * Extracted from fs/crypto/hkdf.c, which has + * Copyright 2019 Google LLC + */ + +#ifndef _CRYPTO_HKDF_H +#define _CRYPTO_HKDF_H + +#include <crypto/hash.h> + +int hkdf_extract(struct crypto_shash *hmac_tfm, const u8 *ikm, + unsigned int ikmlen, const u8 *salt, unsigned int saltlen, + u8 *prk); +int hkdf_expand(struct crypto_shash *hmac_tfm, + const u8 *info, unsigned int infolen, + u8 *okm, unsigned int okmlen); +#endif diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index 8831edaafc05..aaf59f3236fa 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -12,12 +12,17 @@ #include <crypto/acompress.h> #include <crypto/algapi.h> +#define ACOMP_REQUEST_ON_STACK(name, tfm) \ + char __##name##_req[sizeof(struct acomp_req) + \ + MAX_SYNC_COMP_REQSIZE] CRYPTO_MINALIGN_ATTR; \ + struct acomp_req *name = acomp_request_on_stack_init( \ + __##name##_req, (tfm), 0, true) + /** * struct acomp_alg - asynchronous compression algorithm * * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation - * @dst_free: Frees destination buffer if allocated inside the algorithm * @init: Initialize the cryptographic transformation object. * This function is used to initialize the cryptographic * transformation object. This function is called only once at @@ -32,12 +37,12 @@ * * @reqsize: Context size for (de)compression requests * @base: Common crypto API algorithm data structure + * @stream: Per-cpu memory for algorithm * @calg: Cmonn algorithm data structure shared with scomp */ struct acomp_alg { int (*compress)(struct acomp_req *req); int (*decompress)(struct acomp_req *req); - void (*dst_free)(struct scatterlist *dst); int (*init)(struct crypto_acomp *tfm); void (*exit)(struct crypto_acomp *tfm); @@ -68,22 +73,6 @@ static inline void acomp_request_complete(struct acomp_req *req, crypto_request_complete(&req->base, err); } -static inline struct acomp_req *__acomp_request_alloc_noprof(struct crypto_acomp *tfm) -{ - struct acomp_req *req; - - req = kzalloc_noprof(sizeof(*req) + crypto_acomp_reqsize(tfm), GFP_KERNEL); - if (likely(req)) - acomp_request_set_tfm(req, tfm); - return req; -} -#define __acomp_request_alloc(...) alloc_hooks(__acomp_request_alloc_noprof(__VA_ARGS__)) - -static inline void __acomp_request_free(struct acomp_req *req) -{ - kfree_sensitive(req); -} - /** * crypto_register_acomp() -- Register asynchronous compression algorithm * @@ -109,4 +98,64 @@ void crypto_unregister_acomp(struct acomp_alg *alg); int crypto_register_acomps(struct acomp_alg *algs, int count); void crypto_unregister_acomps(struct acomp_alg *algs, int count); +static inline bool acomp_request_chained(struct acomp_req *req) +{ + return crypto_request_chained(&req->base); +} + +static inline bool acomp_request_issg(struct acomp_req *req) +{ + return !(req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_DST_VIRT | + CRYPTO_ACOMP_REQ_SRC_FOLIO | + CRYPTO_ACOMP_REQ_DST_FOLIO)); +} + +static inline bool acomp_request_src_isvirt(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_SRC_VIRT; +} + +static inline bool acomp_request_dst_isvirt(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_DST_VIRT; +} + +static inline bool acomp_request_isvirt(struct acomp_req *req) +{ + return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_VIRT | + CRYPTO_ACOMP_REQ_DST_VIRT); +} + +static inline bool acomp_request_src_isnondma(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_SRC_NONDMA; +} + +static inline bool acomp_request_dst_isnondma(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_DST_NONDMA; +} + +static inline bool acomp_request_isnondma(struct acomp_req *req) +{ + return req->base.flags & (CRYPTO_ACOMP_REQ_SRC_NONDMA | + CRYPTO_ACOMP_REQ_DST_NONDMA); +} + +static inline bool acomp_request_src_isfolio(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_SRC_FOLIO; +} + +static inline bool acomp_request_dst_isfolio(struct acomp_req *req) +{ + return req->base.flags & CRYPTO_ACOMP_REQ_DST_FOLIO; +} + +static inline bool crypto_acomp_req_chain(struct crypto_acomp *tfm) +{ + return crypto_tfm_req_chain(&tfm->base); +} + #endif diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 84da3424decc..485e22cf517e 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -247,5 +247,20 @@ static inline struct crypto_shash *__crypto_shash_cast(struct crypto_tfm *tfm) return container_of(tfm, struct crypto_shash, base); } +static inline bool ahash_request_chained(struct ahash_request *req) +{ + return crypto_request_chained(&req->base); +} + +static inline bool ahash_request_isvirt(struct ahash_request *req) +{ + return req->base.flags & CRYPTO_AHASH_REQ_VIRT; +} + +static inline bool crypto_ahash_req_chain(struct crypto_ahash *tfm) +{ + return crypto_tfm_req_chain(&tfm->base); +} + #endif /* _CRYPTO_INTERNAL_HASH_H */ diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h index 07a10fd2d321..f25aa2ea3b48 100644 --- a/include/crypto/internal/scompress.h +++ b/include/crypto/internal/scompress.h @@ -12,8 +12,6 @@ #include <crypto/acompress.h> #include <crypto/algapi.h> -#define SCOMP_SCRATCH_SIZE 131072 - struct acomp_req; struct crypto_scomp { @@ -28,11 +26,12 @@ struct crypto_scomp { * @compress: Function performs a compress operation * @decompress: Function performs a de-compress operation * @base: Common crypto API algorithm data structure + * @stream: Per-cpu memory for algorithm * @calg: Cmonn algorithm data structure shared with acomp */ struct scomp_alg { - void *(*alloc_ctx)(struct crypto_scomp *tfm); - void (*free_ctx)(struct crypto_scomp *tfm, void *ctx); + void *(*alloc_ctx)(void); + void (*free_ctx)(void *ctx); int (*compress)(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx); @@ -71,17 +70,6 @@ static inline struct scomp_alg *crypto_scomp_alg(struct crypto_scomp *tfm) return __crypto_scomp_alg(crypto_scomp_tfm(tfm)->__crt_alg); } -static inline void *crypto_scomp_alloc_ctx(struct crypto_scomp *tfm) -{ - return crypto_scomp_alg(tfm)->alloc_ctx(tfm); -} - -static inline void crypto_scomp_free_ctx(struct crypto_scomp *tfm, - void *ctx) -{ - return crypto_scomp_alg(tfm)->free_ctx(tfm, ctx); -} - static inline int crypto_scomp_compress(struct crypto_scomp *tfm, const u8 *src, unsigned int slen, u8 *dst, unsigned int *dlen, void *ctx) diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 4f49621d3eb6..a958ab0636ad 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -56,15 +56,31 @@ struct crypto_lskcipher_spawn { struct skcipher_walk { union { + /* Virtual address of the source. */ struct { - void *addr; - } virt; - } src, dst; + struct { + const void *const addr; + } virt; + } src; + + /* Private field for the API, do not use. */ + struct scatter_walk in; + }; - struct scatter_walk in; unsigned int nbytes; - struct scatter_walk out; + union { + /* Virtual address of the destination. */ + struct { + struct { + void *const addr; + } virt; + } dst; + + /* Private field for the API, do not use. */ + struct scatter_walk out; + }; + unsigned int total; u8 *page; @@ -197,13 +213,15 @@ int lskcipher_register_instance(struct crypto_template *tmpl, struct lskcipher_instance *inst); int skcipher_walk_done(struct skcipher_walk *walk, int res); -int skcipher_walk_virt(struct skcipher_walk *walk, - struct skcipher_request *req, +int skcipher_walk_virt(struct skcipher_walk *__restrict walk, + struct skcipher_request *__restrict req, bool atomic); -int skcipher_walk_aead_encrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); -int skcipher_walk_aead_decrypt(struct skcipher_walk *walk, - struct aead_request *req, bool atomic); +int skcipher_walk_aead_encrypt(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic); +int skcipher_walk_aead_decrypt(struct skcipher_walk *__restrict walk, + struct aead_request *__restrict req, + bool atomic); static inline void skcipher_walk_abort(struct skcipher_walk *walk) { diff --git a/include/crypto/krb5.h b/include/crypto/krb5.h new file mode 100644 index 000000000000..62d998e62f47 --- /dev/null +++ b/include/crypto/krb5.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Kerberos 5 crypto + * + * Copyright (C) 2025 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _CRYPTO_KRB5_H +#define _CRYPTO_KRB5_H + +#include <linux/crypto.h> +#include <crypto/aead.h> +#include <crypto/hash.h> + +struct crypto_shash; +struct scatterlist; + +/* + * Per Kerberos v5 protocol spec crypto types from the wire. These get mapped + * to linux kernel crypto routines. + */ +#define KRB5_ENCTYPE_NULL 0x0000 +#define KRB5_ENCTYPE_DES_CBC_CRC 0x0001 /* DES cbc mode with CRC-32 */ +#define KRB5_ENCTYPE_DES_CBC_MD4 0x0002 /* DES cbc mode with RSA-MD4 */ +#define KRB5_ENCTYPE_DES_CBC_MD5 0x0003 /* DES cbc mode with RSA-MD5 */ +#define KRB5_ENCTYPE_DES_CBC_RAW 0x0004 /* DES cbc mode raw */ +/* XXX deprecated? */ +#define KRB5_ENCTYPE_DES3_CBC_SHA 0x0005 /* DES-3 cbc mode with NIST-SHA */ +#define KRB5_ENCTYPE_DES3_CBC_RAW 0x0006 /* DES-3 cbc mode raw */ +#define KRB5_ENCTYPE_DES_HMAC_SHA1 0x0008 +#define KRB5_ENCTYPE_DES3_CBC_SHA1 0x0010 +#define KRB5_ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011 +#define KRB5_ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012 +#define KRB5_ENCTYPE_AES128_CTS_HMAC_SHA256_128 0x0013 +#define KRB5_ENCTYPE_AES256_CTS_HMAC_SHA384_192 0x0014 +#define KRB5_ENCTYPE_ARCFOUR_HMAC 0x0017 +#define KRB5_ENCTYPE_ARCFOUR_HMAC_EXP 0x0018 +#define KRB5_ENCTYPE_CAMELLIA128_CTS_CMAC 0x0019 +#define KRB5_ENCTYPE_CAMELLIA256_CTS_CMAC 0x001a +#define KRB5_ENCTYPE_UNKNOWN 0x01ff + +#define KRB5_CKSUMTYPE_CRC32 0x0001 +#define KRB5_CKSUMTYPE_RSA_MD4 0x0002 +#define KRB5_CKSUMTYPE_RSA_MD4_DES 0x0003 +#define KRB5_CKSUMTYPE_DESCBC 0x0004 +#define KRB5_CKSUMTYPE_RSA_MD5 0x0007 +#define KRB5_CKSUMTYPE_RSA_MD5_DES 0x0008 +#define KRB5_CKSUMTYPE_NIST_SHA 0x0009 +#define KRB5_CKSUMTYPE_HMAC_SHA1_DES3 0x000c +#define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f +#define KRB5_CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010 +#define KRB5_CKSUMTYPE_CMAC_CAMELLIA128 0x0011 +#define KRB5_CKSUMTYPE_CMAC_CAMELLIA256 0x0012 +#define KRB5_CKSUMTYPE_HMAC_SHA256_128_AES128 0x0013 +#define KRB5_CKSUMTYPE_HMAC_SHA384_192_AES256 0x0014 +#define KRB5_CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */ + +/* + * Constants used for key derivation + */ +/* from rfc3961 */ +#define KEY_USAGE_SEED_CHECKSUM (0x99) +#define KEY_USAGE_SEED_ENCRYPTION (0xAA) +#define KEY_USAGE_SEED_INTEGRITY (0x55) + +/* + * Mode of operation. + */ +enum krb5_crypto_mode { + KRB5_CHECKSUM_MODE, /* Checksum only */ + KRB5_ENCRYPT_MODE, /* Fully encrypted, possibly with integrity checksum */ +}; + +struct krb5_buffer { + unsigned int len; + void *data; +}; + +/* + * Kerberos encoding type definition. + */ +struct krb5_enctype { + int etype; /* Encryption (key) type */ + int ctype; /* Checksum type */ + const char *name; /* "Friendly" name */ + const char *encrypt_name; /* Crypto encrypt+checksum name */ + const char *cksum_name; /* Crypto checksum name */ + const char *hash_name; /* Crypto hash name */ + const char *derivation_enc; /* Cipher used in key derivation */ + u16 block_len; /* Length of encryption block */ + u16 conf_len; /* Length of confounder (normally == block_len) */ + u16 cksum_len; /* Length of checksum */ + u16 key_bytes; /* Length of raw key, in bytes */ + u16 key_len; /* Length of final key, in bytes */ + u16 hash_len; /* Length of hash in bytes */ + u16 prf_len; /* Length of PRF() result in bytes */ + u16 Kc_len; /* Length of Kc in bytes */ + u16 Ke_len; /* Length of Ke in bytes */ + u16 Ki_len; /* Length of Ki in bytes */ + bool keyed_cksum; /* T if a keyed cksum */ + + const struct krb5_crypto_profile *profile; + + int (*random_to_key)(const struct krb5_enctype *krb5, + const struct krb5_buffer *in, + struct krb5_buffer *out); /* complete key generation */ +}; + +/* + * krb5_api.c + */ +const struct krb5_enctype *crypto_krb5_find_enctype(u32 enctype); +size_t crypto_krb5_how_much_buffer(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t data_size, size_t *_offset); +size_t crypto_krb5_how_much_data(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t *_buffer_size, size_t *_offset); +void crypto_krb5_where_is_the_data(const struct krb5_enctype *krb5, + enum krb5_crypto_mode mode, + size_t *_offset, size_t *_len); +struct crypto_aead *crypto_krb5_prepare_encryption(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + u32 usage, gfp_t gfp); +struct crypto_shash *crypto_krb5_prepare_checksum(const struct krb5_enctype *krb5, + const struct krb5_buffer *TK, + u32 usage, gfp_t gfp); +ssize_t crypto_krb5_encrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len, + bool preconfounded); +int crypto_krb5_decrypt(const struct krb5_enctype *krb5, + struct crypto_aead *aead, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); +ssize_t crypto_krb5_get_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t sg_len, + size_t data_offset, size_t data_len); +int crypto_krb5_verify_mic(const struct krb5_enctype *krb5, + struct crypto_shash *shash, + const struct krb5_buffer *metadata, + struct scatterlist *sg, unsigned int nr_sg, + size_t *_offset, size_t *_len); + +/* + * krb5_kdf.c + */ +int crypto_krb5_calc_PRFplus(const struct krb5_enctype *krb5, + const struct krb5_buffer *K, + unsigned int L, + const struct krb5_buffer *S, + struct krb5_buffer *result, + gfp_t gfp); + +#endif /* _CRYPTO_KRB5_H */ diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h index 32fc4473175b..94a8585f26b2 100644 --- a/include/crypto/scatterwalk.h +++ b/include/crypto/scatterwalk.h @@ -26,76 +26,218 @@ static inline void scatterwalk_crypto_chain(struct scatterlist *head, sg_mark_end(head); } -static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk) +static inline void scatterwalk_start(struct scatter_walk *walk, + struct scatterlist *sg) +{ + walk->sg = sg; + walk->offset = sg->offset; +} + +/* + * This is equivalent to scatterwalk_start(walk, sg) followed by + * scatterwalk_skip(walk, pos). + */ +static inline void scatterwalk_start_at_pos(struct scatter_walk *walk, + struct scatterlist *sg, + unsigned int pos) { - unsigned int len = walk->sg->offset + walk->sg->length - walk->offset; - unsigned int len_this_page = offset_in_page(~walk->offset) + 1; - return len_this_page > len ? len : len_this_page; + while (pos > sg->length) { + pos -= sg->length; + sg = sg_next(sg); + } + walk->sg = sg; + walk->offset = sg->offset + pos; } static inline unsigned int scatterwalk_clamp(struct scatter_walk *walk, unsigned int nbytes) { - unsigned int len_this_page = scatterwalk_pagelen(walk); - return nbytes > len_this_page ? len_this_page : nbytes; + unsigned int len_this_sg; + unsigned int limit; + + if (walk->offset >= walk->sg->offset + walk->sg->length) + scatterwalk_start(walk, sg_next(walk->sg)); + len_this_sg = walk->sg->offset + walk->sg->length - walk->offset; + + /* + * HIGHMEM case: the page may have to be mapped into memory. To avoid + * the complexity of having to map multiple pages at once per sg entry, + * clamp the returned length to not cross a page boundary. + * + * !HIGHMEM case: no mapping is needed; all pages of the sg entry are + * already mapped contiguously in the kernel's direct map. For improved + * performance, allow the walker to return data segments that cross a + * page boundary. Do still cap the length to PAGE_SIZE, since some + * users rely on that to avoid disabling preemption for too long when + * using SIMD. It's also needed for when skcipher_walk uses a bounce + * page due to the data not being aligned to the algorithm's alignmask. + */ + if (IS_ENABLED(CONFIG_HIGHMEM)) + limit = PAGE_SIZE - offset_in_page(walk->offset); + else + limit = PAGE_SIZE; + + return min3(nbytes, len_this_sg, limit); } -static inline void scatterwalk_advance(struct scatter_walk *walk, - unsigned int nbytes) +/* + * Create a scatterlist that represents the remaining data in a walk. Uses + * chaining to reference the original scatterlist, so this uses at most two + * entries in @sg_out regardless of the number of entries in the original list. + * Assumes that sg_init_table() was already done. + */ +static inline void scatterwalk_get_sglist(struct scatter_walk *walk, + struct scatterlist sg_out[2]) { - walk->offset += nbytes; + if (walk->offset >= walk->sg->offset + walk->sg->length) + scatterwalk_start(walk, sg_next(walk->sg)); + sg_set_page(sg_out, sg_page(walk->sg), + walk->sg->offset + walk->sg->length - walk->offset, + walk->offset); + scatterwalk_crypto_chain(sg_out, sg_next(walk->sg), 2); } -static inline struct page *scatterwalk_page(struct scatter_walk *walk) +static inline void scatterwalk_map(struct scatter_walk *walk) { - return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); + struct page *base_page = sg_page(walk->sg); + unsigned int offset = walk->offset; + void *addr; + + if (IS_ENABLED(CONFIG_HIGHMEM)) { + struct page *page; + + page = nth_page(base_page, offset >> PAGE_SHIFT); + offset = offset_in_page(offset); + addr = kmap_local_page(page) + offset; + } else { + /* + * When !HIGHMEM we allow the walker to return segments that + * span a page boundary; see scatterwalk_clamp(). To make it + * clear that in this case we're working in the linear buffer of + * the whole sg entry in the kernel's direct map rather than + * within the mapped buffer of a single page, compute the + * address as an offset from the page_address() of the first + * page of the sg entry. Either way the result is the address + * in the direct map, but this makes it clearer what is really + * going on. + */ + addr = page_address(base_page) + offset; + } + + walk->__addr = addr; } -static inline void scatterwalk_unmap(void *vaddr) +/** + * scatterwalk_next() - Get the next data buffer in a scatterlist walk + * @walk: the scatter_walk + * @total: the total number of bytes remaining, > 0 + * + * A virtual address for the next segment of data from the scatterlist will + * be placed into @walk->addr. The caller must call scatterwalk_done_src() + * or scatterwalk_done_dst() when it is done using this virtual address. + * + * Returns: the next number of bytes available, <= @total + */ +static inline unsigned int scatterwalk_next(struct scatter_walk *walk, + unsigned int total) { - kunmap_local(vaddr); + unsigned int nbytes = scatterwalk_clamp(walk, total); + + scatterwalk_map(walk); + return nbytes; } -static inline void scatterwalk_start(struct scatter_walk *walk, - struct scatterlist *sg) +static inline void scatterwalk_unmap(struct scatter_walk *walk) { - walk->sg = sg; - walk->offset = sg->offset; + if (IS_ENABLED(CONFIG_HIGHMEM)) + kunmap_local(walk->__addr); } -static inline void *scatterwalk_map(struct scatter_walk *walk) +static inline void scatterwalk_advance(struct scatter_walk *walk, + unsigned int nbytes) { - return kmap_local_page(scatterwalk_page(walk)) + - offset_in_page(walk->offset); + walk->offset += nbytes; } -static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out, - unsigned int more) +/** + * scatterwalk_done_src() - Finish one step of a walk of source scatterlist + * @walk: the scatter_walk + * @nbytes: the number of bytes processed this step, less than or equal to the + * number of bytes that scatterwalk_next() returned. + * + * Use this if the mapped address was not written to, i.e. it is source data. + */ +static inline void scatterwalk_done_src(struct scatter_walk *walk, + unsigned int nbytes) { - if (out) { - struct page *page; - - page = sg_page(walk->sg) + ((walk->offset - 1) >> PAGE_SHIFT); - flush_dcache_page(page); - } - - if (more && walk->offset >= walk->sg->offset + walk->sg->length) - scatterwalk_start(walk, sg_next(walk->sg)); + scatterwalk_unmap(walk); + scatterwalk_advance(walk, nbytes); } -static inline void scatterwalk_done(struct scatter_walk *walk, int out, - int more) +/** + * scatterwalk_done_dst() - Finish one step of a walk of destination scatterlist + * @walk: the scatter_walk + * @nbytes: the number of bytes processed this step, less than or equal to the + * number of bytes that scatterwalk_next() returned. + * + * Use this if the mapped address may have been written to, i.e. it is + * destination data. + */ +static inline void scatterwalk_done_dst(struct scatter_walk *walk, + unsigned int nbytes) { - if (!more || walk->offset >= walk->sg->offset + walk->sg->length || - !(walk->offset & (PAGE_SIZE - 1))) - scatterwalk_pagedone(walk, out, more); + scatterwalk_unmap(walk); + /* + * Explicitly check ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE instead of just + * relying on flush_dcache_page() being a no-op when not implemented, + * since otherwise the BUG_ON in sg_page() does not get optimized out. + * This also avoids having to consider whether the loop would get + * reliably optimized out or not. + */ + if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE) { + struct page *base_page; + unsigned int offset; + int start, end, i; + + base_page = sg_page(walk->sg); + offset = walk->offset; + start = offset >> PAGE_SHIFT; + end = start + (nbytes >> PAGE_SHIFT); + end += (offset_in_page(offset) + offset_in_page(nbytes) + + PAGE_SIZE - 1) >> PAGE_SHIFT; + for (i = start; i < end; i++) + flush_dcache_page(nth_page(base_page, i)); + } + scatterwalk_advance(walk, nbytes); } -void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, - size_t nbytes, int out); +void scatterwalk_skip(struct scatter_walk *walk, unsigned int nbytes); + +void memcpy_from_scatterwalk(void *buf, struct scatter_walk *walk, + unsigned int nbytes); + +void memcpy_to_scatterwalk(struct scatter_walk *walk, const void *buf, + unsigned int nbytes); -void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, - unsigned int start, unsigned int nbytes, int out); +void memcpy_from_sglist(void *buf, struct scatterlist *sg, + unsigned int start, unsigned int nbytes); + +void memcpy_to_sglist(struct scatterlist *sg, unsigned int start, + const void *buf, unsigned int nbytes); + +void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src, + unsigned int nbytes); + +/* In new code, please use memcpy_{from,to}_sglist() directly instead. */ +static inline void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, + unsigned int start, + unsigned int nbytes, int out) +{ + if (out) + memcpy_to_sglist(sg, start, buf, nbytes); + else + memcpy_from_sglist(buf, sg, start, nbytes); +} struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2], struct scatterlist *src, diff --git a/include/crypto/sig.h b/include/crypto/sig.h index cff41ad93824..11024708c069 100644 --- a/include/crypto/sig.h +++ b/include/crypto/sig.h @@ -23,7 +23,8 @@ struct crypto_sig { * struct sig_alg - generic public key signature algorithm * * @sign: Function performs a sign operation as defined by public key - * algorithm. Optional. + * algorithm. On success, the signature size is returned. + * Optional. * @verify: Function performs a complete verify operation as defined by * public key algorithm, returning verification status. Optional. * @set_pub_key: Function invokes the algorithm specific set public key @@ -186,7 +187,7 @@ static inline unsigned int crypto_sig_maxsize(struct crypto_sig *tfm) * @dst: destination obuffer * @dlen: destination length * - * Return: zero on success; error code in case of error + * Return: signature size on success; error code in case of error */ static inline int crypto_sig_sign(struct crypto_sig *tfm, const void *src, unsigned int slen, diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 18a86e0af016..9e5853464345 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -214,16 +214,17 @@ struct lskcipher_alg { #define MAX_SYNC_SKCIPHER_REQSIZE 384 /* - * This performs a type-check against the "tfm" argument to make sure + * This performs a type-check against the "_tfm" argument to make sure * all users have the correct skcipher tfm for doing on-stack requests. */ -#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, tfm) \ +#define SYNC_SKCIPHER_REQUEST_ON_STACK(name, _tfm) \ char __##name##_desc[sizeof(struct skcipher_request) + \ - MAX_SYNC_SKCIPHER_REQSIZE + \ - (!(sizeof((struct crypto_sync_skcipher *)1 == \ - (typeof(tfm))1))) \ + MAX_SYNC_SKCIPHER_REQSIZE \ ] CRYPTO_MINALIGN_ATTR; \ - struct skcipher_request *name = (void *)__##name##_desc + struct skcipher_request *name = \ + (((struct skcipher_request *)__##name##_desc)->base.tfm = \ + crypto_sync_skcipher_tfm((_tfm)), \ + (void *)__##name##_desc) /** * DOC: Symmetric Key Cipher API @@ -311,6 +312,12 @@ static inline struct crypto_tfm *crypto_lskcipher_tfm( return &tfm->base; } +static inline struct crypto_tfm *crypto_sync_skcipher_tfm( + struct crypto_sync_skcipher *tfm) +{ + return crypto_skcipher_tfm(&tfm->base); +} + /** * crypto_free_skcipher() - zeroize and free cipher handle * @tfm: cipher handle to be freed |