summaryrefslogblamecommitdiff
path: root/block/blk-crypto-internal.h
blob: 796f757fe8e923c16d9746f8f40bf2fc8bc091dc (plain) (tree)





































































































































































                                                                               
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright 2019 Google LLC
 */

#ifndef __LINUX_BLK_CRYPTO_INTERNAL_H
#define __LINUX_BLK_CRYPTO_INTERNAL_H

#include <linux/bio.h>
#include <linux/blkdev.h>

/* Represents a crypto mode supported by blk-crypto  */
struct blk_crypto_mode {
	unsigned int keysize; /* key size in bytes */
	unsigned int ivsize; /* iv size in bytes */
};

#ifdef CONFIG_BLK_INLINE_ENCRYPTION

void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
			     unsigned int inc);

bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio);

bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
			     struct bio_crypt_ctx *bc2);

static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
						struct bio *bio)
{
	return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
				       bio->bi_crypt_context);
}

static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
						 struct bio *bio)
{
	return bio_crypt_ctx_mergeable(bio->bi_crypt_context,
				       bio->bi_iter.bi_size, req->crypt_ctx);
}

static inline bool bio_crypt_ctx_merge_rq(struct request *req,
					  struct request *next)
{
	return bio_crypt_ctx_mergeable(req->crypt_ctx, blk_rq_bytes(req),
				       next->crypt_ctx);
}

static inline void blk_crypto_rq_set_defaults(struct request *rq)
{
	rq->crypt_ctx = NULL;
	rq->crypt_keyslot = NULL;
}

static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
{
	return rq->crypt_ctx;
}

#else /* CONFIG_BLK_INLINE_ENCRYPTION */

static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
					       struct bio *bio)
{
	return true;
}

static inline bool bio_crypt_ctx_front_mergeable(struct request *req,
						 struct bio *bio)
{
	return true;
}

static inline bool bio_crypt_ctx_back_mergeable(struct request *req,
						struct bio *bio)
{
	return true;
}

static inline bool bio_crypt_ctx_merge_rq(struct request *req,
					  struct request *next)
{
	return true;
}

static inline void blk_crypto_rq_set_defaults(struct request *rq) { }

static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
{
	return false;
}

#endif /* CONFIG_BLK_INLINE_ENCRYPTION */

void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
{
	if (bio_has_crypt_ctx(bio))
		__bio_crypt_advance(bio, bytes);
}

void __bio_crypt_free_ctx(struct bio *bio);
static inline void bio_crypt_free_ctx(struct bio *bio)
{
	if (bio_has_crypt_ctx(bio))
		__bio_crypt_free_ctx(bio);
}

static inline void bio_crypt_do_front_merge(struct request *rq,
					    struct bio *bio)
{
#ifdef CONFIG_BLK_INLINE_ENCRYPTION
	if (bio_has_crypt_ctx(bio))
		memcpy(rq->crypt_ctx->bc_dun, bio->bi_crypt_context->bc_dun,
		       sizeof(rq->crypt_ctx->bc_dun));
#endif
}

bool __blk_crypto_bio_prep(struct bio **bio_ptr);
static inline bool blk_crypto_bio_prep(struct bio **bio_ptr)
{
	if (bio_has_crypt_ctx(*bio_ptr))
		return __blk_crypto_bio_prep(bio_ptr);
	return true;
}

blk_status_t __blk_crypto_init_request(struct request *rq);
static inline blk_status_t blk_crypto_init_request(struct request *rq)
{
	if (blk_crypto_rq_is_encrypted(rq))
		return __blk_crypto_init_request(rq);
	return BLK_STS_OK;
}

void __blk_crypto_free_request(struct request *rq);
static inline void blk_crypto_free_request(struct request *rq)
{
	if (blk_crypto_rq_is_encrypted(rq))
		__blk_crypto_free_request(rq);
}

void __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
			      gfp_t gfp_mask);
static inline void blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
					  gfp_t gfp_mask)
{
	if (bio_has_crypt_ctx(bio))
		__blk_crypto_rq_bio_prep(rq, bio, gfp_mask);
}

/**
 * blk_crypto_insert_cloned_request - Prepare a cloned request to be inserted
 *				      into a request queue.
 * @rq: the request being queued
 *
 * Return: BLK_STS_OK on success, nonzero on error.
 */
static inline blk_status_t blk_crypto_insert_cloned_request(struct request *rq)
{

	if (blk_crypto_rq_is_encrypted(rq))
		return blk_crypto_init_request(rq);
	return BLK_STS_OK;
}

#endif /* __LINUX_BLK_CRYPTO_INTERNAL_H */