summaryrefslogtreecommitdiff
path: root/crypto
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2014-05-21 20:56:12 +0800
committerHerbert Xu <herbert@gondor.apana.org.au>2014-05-21 20:56:12 +0800
commit75ecb231ff45b54afa9f4ec9137965c3c00868f4 (patch)
treeeb185e64a01a81de14fdc3e380f7273b09bf76a4 /crypto
parent0118a552137506a68ac062981967d8b5147e6028 (diff)
downloadlwn-75ecb231ff45b54afa9f4ec9137965c3c00868f4.tar.gz
lwn-75ecb231ff45b54afa9f4ec9137965c3c00868f4.zip
crypto: hash - Add real ahash walk interface
Although the existing hash walk interface has already been used by a number of ahash crypto drivers, it turns out that none of them were really asynchronous. They were all essentially polling for completion. That's why nobody has noticed until now that the walk interface couldn't work with a real asynchronous driver since the memory is mapped using kmap_atomic. As we now have a use-case for a real ahash implementation on x86, this patch creates a minimal ahash walk interface. Basically it just calls kmap instead of kmap_atomic and does away with the crypto_yield call. Real ahash crypto drivers don't need to yield since by definition they won't be hogging the CPU. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Diffstat (limited to 'crypto')
-rw-r--r--crypto/ahash.c41
1 files changed, 36 insertions, 5 deletions
diff --git a/crypto/ahash.c b/crypto/ahash.c
index 6e7223392e80..f2a5d8f656ff 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -15,6 +15,7 @@
#include <crypto/internal/hash.h>
#include <crypto/scatterwalk.h>
+#include <linux/bug.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -46,7 +47,10 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
unsigned int nbytes = min(walk->entrylen,
((unsigned int)(PAGE_SIZE)) - offset);
- walk->data = kmap_atomic(walk->pg);
+ if (walk->flags & CRYPTO_ALG_ASYNC)
+ walk->data = kmap(walk->pg);
+ else
+ walk->data = kmap_atomic(walk->pg);
walk->data += offset;
if (offset & alignmask) {
@@ -93,8 +97,16 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
return nbytes;
}
- kunmap_atomic(walk->data);
- crypto_yield(walk->flags);
+ if (walk->flags & CRYPTO_ALG_ASYNC)
+ kunmap(walk->pg);
+ else {
+ kunmap_atomic(walk->data);
+ /*
+ * The may sleep test only makes sense for sync users.
+ * Async users don't need to sleep here anyway.
+ */
+ crypto_yield(walk->flags);
+ }
if (err)
return err;
@@ -124,12 +136,31 @@ int crypto_hash_walk_first(struct ahash_request *req,
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
walk->sg = req->src;
- walk->flags = req->base.flags;
+ walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
return hash_walk_new_entry(walk);
}
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
+int crypto_ahash_walk_first(struct ahash_request *req,
+ struct crypto_hash_walk *walk)
+{
+ walk->total = req->nbytes;
+
+ if (!walk->total)
+ return 0;
+
+ walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
+ walk->sg = req->src;
+ walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK;
+ walk->flags |= CRYPTO_ALG_ASYNC;
+
+ BUILD_BUG_ON(CRYPTO_TFM_REQ_MASK & CRYPTO_ALG_ASYNC);
+
+ return hash_walk_new_entry(walk);
+}
+EXPORT_SYMBOL_GPL(crypto_ahash_walk_first);
+
int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
struct crypto_hash_walk *walk,
struct scatterlist *sg, unsigned int len)
@@ -141,7 +172,7 @@ int crypto_hash_walk_first_compat(struct hash_desc *hdesc,
walk->alignmask = crypto_hash_alignmask(hdesc->tfm);
walk->sg = sg;
- walk->flags = hdesc->flags;
+ walk->flags = hdesc->flags & CRYPTO_TFM_REQ_MASK;
return hash_walk_new_entry(walk);
}