diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2023-08-31 15:32:18 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2023-08-31 15:32:18 -0700 |
commit | f35d1706159e015848ec7421e91b44b614c02dc2 (patch) | |
tree | 8a6604d318f604cddf8b84fd809cb1e43dd4553b /net | |
parent | 8ae5d298ef2005da5454fc1680f983e85d3e1622 (diff) | |
parent | b38a6023da6a12b561f0421c6a5a1f7624a1529c (diff) | |
download | lwn-f35d1706159e015848ec7421e91b44b614c02dc2.tar.gz lwn-f35d1706159e015848ec7421e91b44b614c02dc2.zip |
Merge tag 'nfsd-6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux
Pull nfsd updates from Chuck Lever:
"I'm thrilled to announce that the Linux in-kernel NFS server now
offers NFSv4 write delegations. A write delegation enables a client to
cache data and metadata for a single file more aggressively, reducing
network round trips and server workload. Many thanks to Dai Ngo for
contributing this facility, and to Jeff Layton and Neil Brown for
reviewing and testing it.
This release also sees the removal of all support for DES- and
triple-DES-based Kerberos encryption types in the kernel's SunRPC
implementation. These encryption types have been deprecated by the
Internet community for years and are considered insecure. This change
affects both the in-kernel NFS client and server.
The server's UDP and TCP socket transports have now fully adopted
David Howells' new bio_vec iterator so that no more than one sendmsg()
call is needed to transmit each RPC message. In particular, this helps
kTLS optimize record boundaries when sending RPC-with-TLS replies, and
it takes the server a baby step closer to handling file I/O via
folios.
We've begun work on overhauling the SunRPC thread scheduler to remove
a costly linked-list walk when looking for an idle RPC service thread
to wake. The pre-requisites are included in this release. Thanks to
Neil Brown for his ongoing work on this improvement"
* tag 'nfsd-6.6' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux: (56 commits)
Documentation: Add missing documentation for EXPORT_OP flags
SUNRPC: Remove unused declaration rpc_modcount()
SUNRPC: Remove unused declarations
NFSD: da_addr_body field missing in some GETDEVICEINFO replies
SUNRPC: Remove return value of svc_pool_wake_idle_thread()
SUNRPC: make rqst_should_sleep() idempotent()
SUNRPC: Clean up svc_set_num_threads
SUNRPC: Count ingress RPC messages per svc_pool
SUNRPC: Deduplicate thread wake-up code
SUNRPC: Move trace_svc_xprt_enqueue
SUNRPC: Add enum svc_auth_status
SUNRPC: change svc_xprt::xpt_flags bits to enum
SUNRPC: change svc_rqst::rq_flags bits to enum
SUNRPC: change svc_pool::sp_flags bits to enum
SUNRPC: change cache_head.flags bits to enum
SUNRPC: remove timeout arg from svc_recv()
SUNRPC: change svc_recv() to return void.
SUNRPC: call svc_process() from svc_recv().
nfsd: separate nfsd_last_thread() from nfsd_put()
nfsd: Simplify code around svc_exit_thread() call in nfsd()
...
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/.kunitconfig | 1 | ||||
-rw-r--r-- | net/sunrpc/Kconfig | 35 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/Makefile | 2 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_internal.h | 23 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_keys.c | 84 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_mech.c | 257 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_seal.c | 69 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_seqnum.c | 106 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_test.c | 196 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_unseal.c | 77 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/gss_krb5_wrap.c | 287 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/svcauth_gss.c | 7 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 97 | ||||
-rw-r--r-- | net/sunrpc/svc_xprt.c | 126 | ||||
-rw-r--r-- | net/sunrpc/svcauth.c | 35 | ||||
-rw-r--r-- | net/sunrpc/svcauth_unix.c | 9 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 131 | ||||
-rw-r--r-- | net/sunrpc/xdr.c | 50 |
18 files changed, 252 insertions, 1340 deletions
diff --git a/net/sunrpc/.kunitconfig b/net/sunrpc/.kunitconfig index a55a00fa649b..eb02b906c295 100644 --- a/net/sunrpc/.kunitconfig +++ b/net/sunrpc/.kunitconfig @@ -23,7 +23,6 @@ CONFIG_NFS_FS=y CONFIG_SUNRPC=y CONFIG_SUNRPC_GSS=y CONFIG_RPCSEC_GSS_KRB5=y -CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_DES=y CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA=y CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2=y diff --git a/net/sunrpc/Kconfig b/net/sunrpc/Kconfig index 4afc5fd71d44..2d8b67dac7b5 100644 --- a/net/sunrpc/Kconfig +++ b/net/sunrpc/Kconfig @@ -34,38 +34,6 @@ config RPCSEC_GSS_KRB5 If unsure, say Y. -config RPCSEC_GSS_KRB5_SIMPLIFIED - bool - depends on RPCSEC_GSS_KRB5 - -config RPCSEC_GSS_KRB5_CRYPTOSYSTEM - bool - depends on RPCSEC_GSS_KRB5 - -config RPCSEC_GSS_KRB5_ENCTYPES_DES - bool "Enable Kerberos enctypes based on DES (deprecated)" - depends on RPCSEC_GSS_KRB5 - depends on CRYPTO_CBC && CRYPTO_CTS && CRYPTO_ECB - depends on CRYPTO_HMAC && CRYPTO_MD5 && CRYPTO_SHA1 - depends on CRYPTO_DES - default n - select RPCSEC_GSS_KRB5_SIMPLIFIED - help - Choose Y to enable the use of deprecated Kerberos 5 - encryption types that utilize Data Encryption Standard - (DES) based ciphers. These include des-cbc-md5, - des-cbc-crc, and des-cbc-md4, which were deprecated by - RFC 6649, and des3-cbc-sha1, which was deprecated by RFC - 8429. - - These encryption types are known to be insecure, therefore - the default setting of this option is N. Support for these - encryption types is available only for compatibility with - legacy NFS client and server implementations. - - Removal of support is planned for a subsequent kernel - release. - config RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1 bool "Enable Kerberos enctypes based on AES and SHA-1" depends on RPCSEC_GSS_KRB5 @@ -73,7 +41,6 @@ config RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1 depends on CRYPTO_HMAC && CRYPTO_SHA1 depends on CRYPTO_AES default y - select RPCSEC_GSS_KRB5_CRYPTOSYSTEM help Choose Y to enable the use of Kerberos 5 encryption types that utilize Advanced Encryption Standard (AES) ciphers and @@ -86,7 +53,6 @@ config RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA depends on CRYPTO_CBC && CRYPTO_CTS && CRYPTO_CAMELLIA depends on CRYPTO_CMAC default n - select RPCSEC_GSS_KRB5_CRYPTOSYSTEM help Choose Y to enable the use of Kerberos 5 encryption types that utilize Camellia ciphers (RFC 3713) and CMAC digests @@ -100,7 +66,6 @@ config RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2 depends on CRYPTO_HMAC && CRYPTO_SHA256 && CRYPTO_SHA512 depends on CRYPTO_AES default n - select RPCSEC_GSS_KRB5_CRYPTOSYSTEM help Choose Y to enable the use of Kerberos 5 encryption types that utilize Advanced Encryption Standard (AES) ciphers and diff --git a/net/sunrpc/auth_gss/Makefile b/net/sunrpc/auth_gss/Makefile index 012ae1720689..ad1736d93b76 100644 --- a/net/sunrpc/auth_gss/Makefile +++ b/net/sunrpc/auth_gss/Makefile @@ -12,6 +12,6 @@ auth_rpcgss-y := auth_gss.o gss_generic_token.o \ obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o rpcsec_gss_krb5-y := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \ - gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o + gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o obj-$(CONFIG_RPCSEC_GSS_KRB5_KUNIT_TEST) += gss_krb5_test.o diff --git a/net/sunrpc/auth_gss/gss_krb5_internal.h b/net/sunrpc/auth_gss/gss_krb5_internal.h index b673e2626acb..3afd4065bf3d 100644 --- a/net/sunrpc/auth_gss/gss_krb5_internal.h +++ b/net/sunrpc/auth_gss/gss_krb5_internal.h @@ -33,7 +33,6 @@ struct gss_krb5_enctype { const u32 Ke_length; /* encryption subkey length, in octets */ const u32 Ki_length; /* integrity subkey length, in octets */ - int (*import_ctx)(struct krb5_ctx *ctx, gfp_t gfp_mask); int (*derive_key)(const struct gss_krb5_enctype *gk5e, const struct xdr_netobj *in, struct xdr_netobj *out, @@ -85,24 +84,15 @@ struct krb5_ctx { * GSS Kerberos 5 mechanism Per-Message calls. */ -u32 gss_krb5_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, - struct xdr_netobj *token); u32 gss_krb5_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text, struct xdr_netobj *token); -u32 gss_krb5_verify_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *message_buffer, - struct xdr_netobj *read_token); u32 gss_krb5_verify_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *message_buffer, struct xdr_netobj *read_token); -u32 gss_krb5_wrap_v1(struct krb5_ctx *kctx, int offset, - struct xdr_buf *buf, struct page **pages); u32 gss_krb5_wrap_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf, struct page **pages); -u32 gss_krb5_unwrap_v1(struct krb5_ctx *kctx, int offset, int len, - struct xdr_buf *buf, unsigned int *slack, - unsigned int *align); u32 gss_krb5_unwrap_v2(struct krb5_ctx *kctx, int offset, int len, struct xdr_buf *buf, unsigned int *slack, unsigned int *align); @@ -113,12 +103,6 @@ u32 gss_krb5_unwrap_v2(struct krb5_ctx *kctx, int offset, int len, /* Key Derivation Functions */ -int krb5_derive_key_v1(const struct gss_krb5_enctype *gk5e, - const struct xdr_netobj *inkey, - struct xdr_netobj *outkey, - const struct xdr_netobj *label, - gfp_t gfp_mask); - int krb5_derive_key_v2(const struct gss_krb5_enctype *gk5e, const struct xdr_netobj *inkey, struct xdr_netobj *outkey, @@ -169,13 +153,6 @@ static inline int krb5_derive_key(struct krb5_ctx *kctx, return gk5e->derive_key(gk5e, inkey, outkey, &label, gfp_mask); } -s32 krb5_make_seq_num(struct krb5_ctx *kctx, struct crypto_sync_skcipher *key, - int direction, u32 seqnum, unsigned char *cksum, - unsigned char *buf); - -s32 krb5_get_seq_num(struct krb5_ctx *kctx, unsigned char *cksum, - unsigned char *buf, int *direction, u32 *seqnum); - void krb5_make_confounder(u8 *p, int conflen); u32 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen, diff --git a/net/sunrpc/auth_gss/gss_krb5_keys.c b/net/sunrpc/auth_gss/gss_krb5_keys.c index 5347fe1cc93f..06d8ee0db000 100644 --- a/net/sunrpc/auth_gss/gss_krb5_keys.c +++ b/net/sunrpc/auth_gss/gss_krb5_keys.c @@ -222,90 +222,6 @@ err_return: return ret; } -#define smask(step) ((1<<step)-1) -#define pstep(x, step) (((x)&smask(step))^(((x)>>step)&smask(step))) -#define parity_char(x) pstep(pstep(pstep((x), 4), 2), 1) - -static void mit_des_fixup_key_parity(u8 key[8]) -{ - int i; - for (i = 0; i < 8; i++) { - key[i] &= 0xfe; - key[i] |= 1^parity_char(key[i]); - } -} - -static int krb5_random_to_key_v1(const struct gss_krb5_enctype *gk5e, - struct xdr_netobj *randombits, - struct xdr_netobj *key) -{ - int i, ret = -EINVAL; - - if (key->len != 24) { - dprintk("%s: key->len is %d\n", __func__, key->len); - goto err_out; - } - if (randombits->len != 21) { - dprintk("%s: randombits->len is %d\n", - __func__, randombits->len); - goto err_out; - } - - /* take the seven bytes, move them around into the top 7 bits of the - 8 key bytes, then compute the parity bits. Do this three times. */ - - for (i = 0; i < 3; i++) { - memcpy(key->data + i*8, randombits->data + i*7, 7); - key->data[i*8+7] = (((key->data[i*8]&1)<<1) | - ((key->data[i*8+1]&1)<<2) | - ((key->data[i*8+2]&1)<<3) | - ((key->data[i*8+3]&1)<<4) | - ((key->data[i*8+4]&1)<<5) | - ((key->data[i*8+5]&1)<<6) | - ((key->data[i*8+6]&1)<<7)); - - mit_des_fixup_key_parity(key->data + i*8); - } - ret = 0; -err_out: - return ret; -} - -/** - * krb5_derive_key_v1 - Derive a subkey for an RFC 3961 enctype - * @gk5e: Kerberos 5 enctype profile - * @inkey: base protocol key - * @outkey: OUT: derived key - * @label: subkey usage label - * @gfp_mask: memory allocation control flags - * - * Caller sets @outkey->len to the desired length of the derived key. - * - * On success, returns 0 and fills in @outkey. A negative errno value - * is returned on failure. - */ -int krb5_derive_key_v1(const struct gss_krb5_enctype *gk5e, - const struct xdr_netobj *inkey, - struct xdr_netobj *outkey, - const struct xdr_netobj *label, - gfp_t gfp_mask) -{ - struct xdr_netobj inblock; - int ret; - - inblock.len = gk5e->keybytes; - inblock.data = kmalloc(inblock.len, gfp_mask); - if (!inblock.data) - return -ENOMEM; - - ret = krb5_DK(gk5e, inkey, inblock.data, label, gfp_mask); - if (!ret) - ret = krb5_random_to_key_v1(gk5e, &inblock, outkey); - - kfree_sensitive(inblock.data); - return ret; -} - /* * This is the identity function, with some sanity checking. */ diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 20e21d08badb..e31cfdf7eadc 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -30,61 +30,7 @@ static struct gss_api_mech gss_kerberos_mech; -#if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED) -static int gss_krb5_import_ctx_des(struct krb5_ctx *ctx, gfp_t gfp_mask); -static int gss_krb5_import_ctx_v1(struct krb5_ctx *ctx, gfp_t gfp_mask); -#endif -#if defined(CONFIG_RPCSEC_GSS_KRB5_CRYPTOSYSTEM) -static int gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask); -#endif - static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { -#if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_DES) - /* - * DES (All DES enctypes are mapped to the same gss functionality) - */ - { - .etype = ENCTYPE_DES_CBC_RAW, - .ctype = CKSUMTYPE_RSA_MD5, - .name = "des-cbc-crc", - .encrypt_name = "cbc(des)", - .cksum_name = "md5", - .import_ctx = gss_krb5_import_ctx_des, - .get_mic = gss_krb5_get_mic_v1, - .verify_mic = gss_krb5_verify_mic_v1, - .wrap = gss_krb5_wrap_v1, - .unwrap = gss_krb5_unwrap_v1, - .signalg = SGN_ALG_DES_MAC_MD5, - .sealalg = SEAL_ALG_DES, - .keybytes = 7, - .keylength = 8, - .cksumlength = 8, - .keyed_cksum = 0, - }, - /* - * 3DES - */ - { - .etype = ENCTYPE_DES3_CBC_RAW, - .ctype = CKSUMTYPE_HMAC_SHA1_DES3, - .name = "des3-hmac-sha1", - .encrypt_name = "cbc(des3_ede)", - .cksum_name = "hmac(sha1)", - .import_ctx = gss_krb5_import_ctx_v1, - .derive_key = krb5_derive_key_v1, - .get_mic = gss_krb5_get_mic_v1, - .verify_mic = gss_krb5_verify_mic_v1, - .wrap = gss_krb5_wrap_v1, - .unwrap = gss_krb5_unwrap_v1, - .signalg = SGN_ALG_HMAC_SHA1_DES3_KD, - .sealalg = SEAL_ALG_DES3KD, - .keybytes = 21, - .keylength = 24, - .cksumlength = 20, - .keyed_cksum = 1, - }, -#endif - #if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1) /* * AES-128 with SHA-1 (RFC 3962) @@ -96,7 +42,6 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .encrypt_name = "cts(cbc(aes))", .aux_cipher = "cbc(aes)", .cksum_name = "hmac(sha1)", - .import_ctx = gss_krb5_import_ctx_v2, .derive_key = krb5_derive_key_v2, .encrypt = gss_krb5_aes_encrypt, .decrypt = gss_krb5_aes_decrypt, @@ -126,7 +71,6 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .encrypt_name = "cts(cbc(aes))", .aux_cipher = "cbc(aes)", .cksum_name = "hmac(sha1)", - .import_ctx = gss_krb5_import_ctx_v2, .derive_key = krb5_derive_key_v2, .encrypt = gss_krb5_aes_encrypt, .decrypt = gss_krb5_aes_decrypt, @@ -166,7 +110,6 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .Ke_length = BITS2OCTETS(128), .Ki_length = BITS2OCTETS(128), - .import_ctx = gss_krb5_import_ctx_v2, .derive_key = krb5_kdf_feedback_cmac, .encrypt = gss_krb5_aes_encrypt, .decrypt = gss_krb5_aes_decrypt, @@ -193,7 +136,6 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .Ke_length = BITS2OCTETS(256), .Ki_length = BITS2OCTETS(256), - .import_ctx = gss_krb5_import_ctx_v2, .derive_key = krb5_kdf_feedback_cmac, .encrypt = gss_krb5_aes_encrypt, .decrypt = gss_krb5_aes_decrypt, @@ -223,7 +165,6 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .Ke_length = BITS2OCTETS(128), .Ki_length = BITS2OCTETS(128), - .import_ctx = gss_krb5_import_ctx_v2, .derive_key = krb5_kdf_hmac_sha2, .encrypt = krb5_etm_encrypt, .decrypt = krb5_etm_decrypt, @@ -250,7 +191,6 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = { .Ke_length = BITS2OCTETS(256), .Ki_length = BITS2OCTETS(192), - .import_ctx = gss_krb5_import_ctx_v2, .derive_key = krb5_kdf_hmac_sha2, .encrypt = krb5_etm_encrypt, .decrypt = krb5_etm_decrypt, @@ -284,12 +224,6 @@ static void gss_krb5_prepare_enctype_priority_list(void) ENCTYPE_AES256_CTS_HMAC_SHA1_96, ENCTYPE_AES128_CTS_HMAC_SHA1_96, #endif -#if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_DES) - ENCTYPE_DES3_CBC_SHA1, - ENCTYPE_DES_CBC_MD5, - ENCTYPE_DES_CBC_CRC, - ENCTYPE_DES_CBC_MD4, -#endif }; size_t total, i; char buf[16]; @@ -330,185 +264,6 @@ const struct gss_krb5_enctype *gss_krb5_lookup_enctype(u32 etype) EXPORT_SYMBOL_IF_KUNIT(gss_krb5_lookup_enctype); static struct crypto_sync_skcipher * -gss_krb5_alloc_cipher_v1(struct krb5_ctx *ctx, struct xdr_netobj *key) -{ - struct crypto_sync_skcipher *tfm; - - tfm = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0); - if (IS_ERR(tfm)) - return NULL; - if (crypto_sync_skcipher_setkey(tfm, key->data, key->len)) { - crypto_free_sync_skcipher(tfm); - return NULL; - } - return tfm; -} - -static inline const void * -get_key(const void *p, const void *end, - struct krb5_ctx *ctx, struct crypto_sync_skcipher **res) -{ - struct crypto_sync_skcipher *tfm; - struct xdr_netobj key; - int alg; - - p = simple_get_bytes(p, end, &alg, sizeof(alg)); - if (IS_ERR(p)) - goto out_err; - switch (alg) { - case ENCTYPE_DES_CBC_CRC: - case ENCTYPE_DES_CBC_MD4: - case ENCTYPE_DES_CBC_MD5: - /* Map all these key types to ENCTYPE_DES_CBC_RAW */ - alg = ENCTYPE_DES_CBC_RAW; - break; - } - if (!gss_krb5_lookup_enctype(alg)) { - pr_warn("gss_krb5: unsupported enctype: %d\n", alg); - goto out_err_inval; - } - - p = simple_get_netobj(p, end, &key); - if (IS_ERR(p)) - goto out_err; - tfm = gss_krb5_alloc_cipher_v1(ctx, &key); - kfree(key.data); - if (!tfm) { - pr_warn("gss_krb5: failed to initialize cipher '%s'\n", - ctx->gk5e->encrypt_name); - goto out_err_inval; - } - *res = tfm; - - return p; - -out_err_inval: - p = ERR_PTR(-EINVAL); -out_err: - return p; -} - -static int -gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx) -{ - u32 seq_send; - int tmp; - u32 time32; - - p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); - if (IS_ERR(p)) - goto out_err; - - /* Old format supports only DES! Any other enctype uses new format */ - ctx->enctype = ENCTYPE_DES_CBC_RAW; - - ctx->gk5e = gss_krb5_lookup_enctype(ctx->enctype); - if (ctx->gk5e == NULL) { - p = ERR_PTR(-EINVAL); - goto out_err; - } - - /* The downcall format was designed before we completely understood - * the uses of the context fields; so it includes some stuff we - * just give some minimal sanity-checking, and some we ignore - * completely (like the next twenty bytes): */ - if (unlikely(p + 20 > end || p + 20 < p)) { - p = ERR_PTR(-EFAULT); - goto out_err; - } - p += 20; - p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); - if (IS_ERR(p)) - goto out_err; - if (tmp != SGN_ALG_DES_MAC_MD5) { - p = ERR_PTR(-ENOSYS); - goto out_err; - } - p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); - if (IS_ERR(p)) - goto out_err; - if (tmp != SEAL_ALG_DES) { - p = ERR_PTR(-ENOSYS); - goto out_err; - } - p = simple_get_bytes(p, end, &time32, sizeof(time32)); - if (IS_ERR(p)) - goto out_err; - /* unsigned 32-bit time overflows in year 2106 */ - ctx->endtime = (time64_t)time32; - p = simple_get_bytes(p, end, &seq_send, sizeof(seq_send)); - if (IS_ERR(p)) - goto out_err; - atomic_set(&ctx->seq_send, seq_send); - p = simple_get_netobj(p, end, &ctx->mech_used); - if (IS_ERR(p)) - goto out_err; - p = get_key(p, end, ctx, &ctx->enc); - if (IS_ERR(p)) - goto out_err_free_mech; - p = get_key(p, end, ctx, &ctx->seq); - if (IS_ERR(p)) - goto out_err_free_key1; - if (p != end) { - p = ERR_PTR(-EFAULT); - goto out_err_free_key2; - } - - return 0; - -out_err_free_key2: - crypto_free_sync_skcipher(ctx->seq); -out_err_free_key1: - crypto_free_sync_skcipher(ctx->enc); -out_err_free_mech: - kfree(ctx->mech_used.data); -out_err: - return PTR_ERR(p); -} - -#if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED) -static int -gss_krb5_import_ctx_des(struct krb5_ctx *ctx, gfp_t gfp_mask) -{ - return -EINVAL; -} - -static int -gss_krb5_import_ctx_v1(struct krb5_ctx *ctx, gfp_t gfp_mask) -{ - struct xdr_netobj keyin, keyout; - - keyin.data = ctx->Ksess; - keyin.len = ctx->gk5e->keylength; - - ctx->seq = gss_krb5_alloc_cipher_v1(ctx, &keyin); - if (ctx->seq == NULL) - goto out_err; - ctx->enc = gss_krb5_alloc_cipher_v1(ctx, &keyin); - if (ctx->enc == NULL) - goto out_free_seq; - - /* derive cksum */ - keyout.data = ctx->cksum; - keyout.len = ctx->gk5e->keylength; - if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_SIGN, - KEY_USAGE_SEED_CHECKSUM, gfp_mask)) - goto out_free_enc; - - return 0; - -out_free_enc: - crypto_free_sync_skcipher(ctx->enc); -out_free_seq: - crypto_free_sync_skcipher(ctx->seq); -out_err: - return -EINVAL; -} -#endif - -#if defined(CONFIG_RPCSEC_GSS_KRB5_CRYPTOSYSTEM) - -static struct crypto_sync_skcipher * gss_krb5_alloc_cipher_v2(const char *cname, const struct xdr_netobj *key) { struct crypto_sync_skcipher *tfm; @@ -636,8 +391,6 @@ out_free: goto out; } -#endif - static int gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, gfp_t gfp_mask) @@ -671,9 +424,6 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype)); if (IS_ERR(p)) goto out_err; - /* Map ENCTYPE_DES3_CBC_SHA1 to ENCTYPE_DES3_CBC_RAW */ - if (ctx->enctype == ENCTYPE_DES3_CBC_SHA1) - ctx->enctype = ENCTYPE_DES3_CBC_RAW; ctx->gk5e = gss_krb5_lookup_enctype(ctx->enctype); if (ctx->gk5e == NULL) { dprintk("gss_kerberos_mech: unsupported krb5 enctype %u\n", @@ -700,7 +450,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, } ctx->mech_used.len = gss_kerberos_mech.gm_oid.len; - return ctx->gk5e->import_ctx(ctx, gfp_mask); + return gss_krb5_import_ctx_v2(ctx, gfp_mask); out_err: return PTR_ERR(p); @@ -718,10 +468,7 @@ gss_krb5_import_sec_context(const void *p, size_t len, struct gss_ctx *ctx_id, if (ctx == NULL) return -ENOMEM; - if (len == 85) - ret = gss_import_v1_context(p, end, ctx); - else - ret = gss_import_v2_context(p, end, ctx, gfp_mask); + ret = gss_import_v2_context(p, end, ctx, gfp_mask); memzero_explicit(&ctx->Ksess, sizeof(ctx->Ksess)); if (ret) { kfree(ctx); diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 146aa755f07d..ce540df9bce4 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -71,75 +71,6 @@ # define RPCDBG_FACILITY RPCDBG_AUTH #endif -#if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED) - -static void * -setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token) -{ - u16 *ptr; - void *krb5_hdr; - int body_size = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength; - - token->len = g_token_size(&ctx->mech_used, body_size); - - ptr = (u16 *)token->data; - g_make_token_header(&ctx->mech_used, body_size, (unsigned char **)&ptr); - - /* ptr now at start of header described in rfc 1964, section 1.2.1: */ - krb5_hdr = ptr; - *ptr++ = KG_TOK_MIC_MSG; - /* - * signalg is stored as if it were converted from LE to host endian, even - * though it's an opaque pair of bytes according to the RFC. - */ - *ptr++ = (__force u16)cpu_to_le16(ctx->gk5e->signalg); - *ptr++ = SEAL_ALG_NONE; - *ptr = 0xffff; - - return krb5_hdr; -} - -u32 -gss_krb5_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, - struct xdr_netobj *token) -{ - char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; - struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), - .data = cksumdata}; - void *ptr; - time64_t now; - u32 seq_send; - u8 *cksumkey; - - dprintk("RPC: %s\n", __func__); - BUG_ON(ctx == NULL); - - now = ktime_get_real_seconds(); - - ptr = setup_token(ctx, token); - - if (ctx->gk5e->keyed_cksum) - cksumkey = ctx->cksum; - else - cksumkey = NULL; - - if (make_checksum(ctx, ptr, 8, text, 0, cksumkey, - KG_USAGE_SIGN, &md5cksum)) - return GSS_S_FAILURE; - - memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); - - seq_send = atomic_fetch_inc(&ctx->seq_send); - - if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff, - seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)) - return GSS_S_FAILURE; - - return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; -} - -#endif - static void * setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token) { diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c deleted file mode 100644 index 1babc3474e10..000000000000 --- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c +++ /dev/null @@ -1,106 +0,0 @@ -/* - * linux/net/sunrpc/gss_krb5_seqnum.c - * - * Adapted from MIT Kerberos 5-1.2.1 lib/gssapi/krb5/util_seqnum.c - * - * Copyright (c) 2000 The Regents of the University of Michigan. - * All rights reserved. - * - * Andy Adamson <andros@umich.edu> - */ - -/* - * Copyright 1993 by OpenVision Technologies, Inc. - * - * Permission to use, copy, modify, distribute, and sell this software - * and its documentation for any purpose is hereby granted without fee, - * provided that the above copyright notice appears in all copies and - * that both that copyright notice and this permission notice appear in - * supporting documentation, and that the name of OpenVision not be used - * in advertising or publicity pertaining to distribution of the software - * without specific, written prior permission. OpenVision makes no - * representations about the suitability of this software for any - * purpose. It is provided "as is" without express or implied warranty. - * - * OPENVISION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, - * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO - * EVENT SHALL OPENVISION BE LIABLE FOR ANY SPECIAL, INDIRECT OR - * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF - * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR - * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR - * PERFORMANCE OF THIS SOFTWARE. - */ - -#include <crypto/skcipher.h> -#include <linux/types.h> -#include <linux/sunrpc/gss_krb5.h> - -#include "gss_krb5_internal.h" - -#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) -# define RPCDBG_FACILITY RPCDBG_AUTH -#endif - -s32 -krb5_make_seq_num(struct krb5_ctx *kctx, - struct crypto_sync_skcipher *key, - int direction, - u32 seqnum, - unsigned char *cksum, unsigned char *buf) -{ - unsigned char *plain; - s32 code; - - plain = kmalloc(8, GFP_KERNEL); - if (!plain) - return -ENOMEM; - - plain[0] = (unsigned char) (seqnum & 0xff); - plain[1] = (unsigned char) ((seqnum >> 8) & 0xff); - plain[2] = (unsigned char) ((seqnum >> 16) & 0xff); - plain[3] = (unsigned char) ((seqnum >> 24) & 0xff); - - plain[4] = direction; - plain[5] = direction; - plain[6] = direction; - plain[7] = direction; - - code = krb5_encrypt(key, cksum, plain, buf, 8); - kfree(plain); - return code; -} - -s32 -krb5_get_seq_num(struct krb5_ctx *kctx, - unsigned char *cksum, - unsigned char *buf, - int *direction, u32 *seqnum) -{ - s32 code; - unsigned char *plain; - struct crypto_sync_skcipher *key = kctx->seq; - - dprintk("RPC: krb5_get_seq_num:\n"); - - plain = kmalloc(8, GFP_KERNEL); - if (!plain) - return -ENOMEM; - - if ((code = krb5_decrypt(key, cksum, buf, plain, 8))) - goto out; - - if ((plain[4] != plain[5]) || (plain[4] != plain[6]) || - (plain[4] != plain[7])) { - code = (s32)KG_BAD_SEQ; - goto out; - } - - *direction = plain[4]; - - *seqnum = ((plain[0]) | - (plain[1] << 8) | (plain[2] << 16) | (plain[3] << 24)); - -out: - kfree(plain); - return code; -} diff --git a/net/sunrpc/auth_gss/gss_krb5_test.c b/net/sunrpc/auth_gss/gss_krb5_test.c index 95ca783795c5..85625e3f3814 100644 --- a/net/sunrpc/auth_gss/gss_krb5_test.c +++ b/net/sunrpc/auth_gss/gss_krb5_test.c @@ -320,208 +320,12 @@ static void rfc3961_nfold_case(struct kunit *test) "result mismatch"); } -/* - * RFC 3961 Appendix A.3. DES3 DR and DK - * - * These tests show the derived-random and derived-key values for the - * des3-hmac-sha1-kd encryption scheme, using the DR and DK functions - * defined in section 6.3.1. The input keys were randomly generated; - * the usage values are from this specification. - * - * This test material is copyright (C) The Internet Society (2005). - */ - -DEFINE_HEX_XDR_NETOBJ(des3_dk_usage_155, - 0x00, 0x00, 0x00, 0x01, 0x55 -); - -DEFINE_HEX_XDR_NETOBJ(des3_dk_usage_1aa, - 0x00, 0x00, 0x00, 0x01, 0xaa -); - -DEFINE_HEX_XDR_NETOBJ(des3_dk_usage_kerberos, - 0x6b, 0x65, 0x72, 0x62, 0x65, 0x72, 0x6f, 0x73 -); - -DEFINE_HEX_XDR_NETOBJ(des3_dk_test1_base_key, - 0xdc, 0xe0, 0x6b, 0x1f, 0x64, 0xc8, 0x57, 0xa1, - 0x1c, 0x3d, 0xb5, 0x7c, 0x51, 0x89, 0x9b, 0x2c, - 0xc1, 0x79, 0x10, 0x08, 0xce, 0x97, 0x3b, 0x92 -); -DEFINE_HEX_XDR_NETOBJ(des3_dk_test1_derived_key, - 0x92, 0x51, 0x79, 0xd0, 0x45, 0x91, 0xa7, 0x9b, - 0x5d, 0x31, 0x92, 0xc4, 0xa7, 0xe9, 0xc2, 0x89, - 0xb0, 0x49, 0xc7, 0x1f, 0x6e, 0xe6, 0x04, 0xcd -); - -DEFINE_HEX_XDR_NETOBJ(des3_dk_test2_base_key, - 0x5e, 0x13, 0xd3, 0x1c, 0x70, 0xef, 0x76, 0x57, - 0x46, 0x57, 0x85, 0x31, 0xcb, 0x51, 0xc1, 0x5b, - 0xf1, 0x1c, 0xa8, 0x2c, 0x97, 0xce, 0xe9, 0xf2 -); -DEFINE_HEX_XDR_NETOBJ(des3_dk_test2_derived_key, - 0x9e, 0x58, 0xe5, 0xa1, 0x46, 0xd9, 0x94, 0x2a, - 0x10, 0x1c, 0x46, 0x98, 0x45, 0xd6, 0x7a, 0x20, - 0xe3, 0xc4, 0x25, 0x9e, 0xd9, 0x13, 0xf2, 0x07 -); - -DEFINE_HEX_XDR_NETOBJ(des3_dk_test3_base_key, - 0x98, 0xe6, 0xfd, 0x8a, 0x04, 0xa4, 0xb6, 0x85, - 0x9b, 0x75, 0xa1, 0x76, 0x54, 0x0b, 0x97, 0x52, - 0xba, 0xd3, 0xec, 0xd6, 0x10, 0xa2, 0x52, 0xbc -); -DEFINE_HEX_XDR_NETOBJ(des3_dk_test3_derived_key, - 0x13, 0xfe, 0xf8, 0x0d, 0x76, 0x3e, 0x94, 0xec, - 0x6d, 0x13, 0xfd, 0x2c, 0xa1, 0xd0, 0x85, 0x07, - 0x02, 0x49, 0xda, 0xd3, 0x98, 0x08, 0xea, 0xbf -); - -DEFINE_HEX_XDR_NETOBJ(des3_dk_test4_base_key, - 0x62, 0x2a, 0xec, 0x25, 0xa2, 0xfe, 0x2c, 0xad, - 0x70, 0x94, 0x68, 0x0b, 0x7c, 0x64, 0x94, 0x02, - 0x80, 0x08, 0x4c, 0x1a, 0x7c, 0xec, 0x92, 0xb5 -); -DEFINE_HEX_XDR_NETOBJ(des3_dk_test4_derived_key, - 0xf8, 0xdf, 0xbf, 0x04, 0xb0, 0x97, 0xe6, 0xd9, - 0xdc, 0x07, 0x02, 0x68, 0x6b, 0xcb, 0x34, 0x89, - 0xd9, 0x1f, 0xd9, 0xa4, 0x51, 0x6b, 0x70, 0x3e -); - -DEFINE_HEX_XDR_NETOBJ(des3_dk_test5_base_key, - 0xd3, 0xf8, 0x29, 0x8c, 0xcb, 0x16, 0x64, 0x38, - 0xdc, 0xb9, 0xb9, 0x3e, 0xe5, 0xa7, 0x62, 0x92, - 0x86, 0xa4, 0x91, 0xf8, 0x38, 0xf8, 0x02, 0xfb -); -DEFINE_HEX_XDR_NETOBJ(des3_dk_test5_derived_key, - 0x23, 0x70, 0xda, 0x57, 0x5d, 0x2a, 0x3d, 0xa8, - 0x64, 0xce, 0xbf, 0xdc, 0x52, 0x04, 0xd5, 0x6d, - 0xf7, 0x79, 0xa7, 0xdf, 0x43, 0xd9, 0xda, 0x43 -); - -DEFINE_HEX_XDR_NETOBJ(des3_dk_test6_base_key, - 0xc1, 0x08, 0x16, 0x49, 0xad, 0xa7, 0x43, 0x62, - 0xe6, 0xa1, 0x45, 0x9d, 0x01, 0xdf, 0xd3, 0x0d, - 0x67, 0xc2, 0x23, 0x4c, 0x94, 0x07, 0x04, 0xda -); -DEFINE_HEX_XDR_NETOBJ(des3_dk_test6_derived_key, - 0x34, 0x80, 0x57, 0xec, 0x98, 0xfd, 0xc4, 0x80, - 0x16, 0x16, 0x1c, 0x2a, 0x4c, 0x7a, 0x94, 0x3e, - 0x92, 0xae, 0x49, 0x2c, 0x98, 0x91, 0x75, 0xf7 -); - -DEFINE_HEX_XDR_NETOBJ(des3_dk_test7_base_key, - 0x5d, 0x15, 0x4a, 0xf2, 0x38, 0xf4, 0x67, 0x13, - 0x15, 0x57, 0x19, 0xd5, 0x5e, 0x2f, 0x1f, 0x79, - 0x0d, 0xd6, 0x61, 0xf2, 0x79, 0xa7, 0x91, 0x7c -); -DEFINE_HEX_XDR_NETOBJ(des3_dk_test7_derived_key, - 0xa8, 0x80, 0x8a, 0xc2, 0x67, 0xda, 0xda, 0x3d, - 0xcb, 0xe9, 0xa7, 0xc8, 0x46, 0x26, 0xfb, 0xc7, - 0x61, 0xc2, 0x94, 0xb0, 0x13, 0x15, 0xe5, 0xc1 -); - -DEFINE_HEX_XDR_NETOBJ(des3_dk_test8_base_key, - 0x79, 0x85, 0x62, 0xe0, 0x49, 0x85, 0x2f, 0x57, - 0xdc, 0x8c, 0x34, 0x3b, 0xa1, 0x7f, 0x2c, 0xa1, - 0xd9, 0x73, 0x94, 0xef, 0xc8, 0xad, 0xc4, 0x43 -); -DEFINE_HEX_XDR_NETOBJ(des3_dk_test8_derived_key, - 0xc8, 0x13, 0xf8, 0x8a, 0x3b, 0xe3, 0xb3, 0x34, - 0xf7, 0x54, 0x25, 0xce, 0x91, 0x75, 0xfb, 0xe3, - 0xc8, 0x49, 0x3b, 0x89, 0xc8, 0x70, 0x3b, 0x49 -); - -DEFINE_HEX_XDR_NETOBJ(des3_dk_test9_base_key, - 0x26, 0xdc, 0xe3, 0x34, 0xb5, 0x45, 0x29, 0x2f, - 0x2f, 0xea, 0xb9, 0xa8, 0x70, 0x1a, 0x89, 0xa4, - 0xb9, 0x9e, 0xb9, 0x94, 0x2c, 0xec, 0xd0, 0x16 -); -DEFINE_HEX_XDR_NETOBJ(des3_dk_test9_derived_key, - 0xf4, 0x8f, 0xfd, 0x6e, 0x83, 0xf8, 0x3e, 0x73, - 0x54, 0xe6, 0x94, 0xfd, 0x25, 0x2c, 0xf8, 0x3b, - 0xfe, 0x58, 0xf7, 0xd5, 0xba, 0x37, 0xec, 0x5d -); - -static const struct gss_krb5_test_param rfc3961_kdf_test_params[] = { - { - .desc = "des3-hmac-sha1 key derivation case 1", - .enctype = ENCTYPE_DES3_CBC_RAW, - .base_key = &des3_dk_test1_base_key, - .usage = &des3_dk_usage_155, - .expected_result = &des3_dk_test1_derived_key, - }, - { - .desc = "des3-hmac-sha1 key derivation case 2", - .enctype = ENCTYPE_DES3_CBC_RAW, - .base_key = &des3_dk_test2_base_key, - .usage = &des3_dk_usage_1aa, - .expected_result = &des3_dk_test2_derived_key, - }, - { - .desc = "des3-hmac-sha1 key derivation case 3", - .enctype = ENCTYPE_DES3_CBC_RAW, - .base_key = &des3_dk_test3_base_key, - .usage = &des3_dk_usage_155, - .expected_result = &des3_dk_test3_derived_key, - }, - { - .desc = "des3-hmac-sha1 key derivation case 4", - .enctype = ENCTYPE_DES3_CBC_RAW, - .base_key = &des3_dk_test4_base_key, - .usage = &des3_dk_usage_1aa, - .expected_result = &des3_dk_test4_derived_key, - }, - { - .desc = "des3-hmac-sha1 key derivation case 5", - .enctype = ENCTYPE_DES3_CBC_RAW, - .base_key = &des3_dk_test5_base_key, - .usage = &des3_dk_usage_kerberos, - .expected_result = &des3_dk_test5_derived_key, - }, - { - .desc = "des3-hmac-sha1 key derivation case 6", - .enctype = ENCTYPE_DES3_CBC_RAW, - .base_key = &des3_dk_test6_base_key, - .usage = &des3_dk_usage_155, - .expected_result = &des3_dk_test6_derived_key, - }, - { - .desc = "des3-hmac-sha1 key derivation case 7", - .enctype = ENCTYPE_DES3_CBC_RAW, - .base_key = &des3_dk_test7_base_key, - .usage = &des3_dk_usage_1aa, - .expected_result = &des3_dk_test7_derived_key, - }, - { - .desc = "des3-hmac-sha1 key derivation case 8", - .enctype = ENCTYPE_DES3_CBC_RAW, - .base_key = &des3_dk_test8_base_key, - .usage = &des3_dk_usage_155, - .expected_result = &des3_dk_test8_derived_key, - }, - { - .desc = "des3-hmac-sha1 key derivation case 9", - .enctype = ENCTYPE_DES3_CBC_RAW, - .base_key = &des3_dk_test9_base_key, - .usage = &des3_dk_usage_1aa, - .expected_result = &des3_dk_test9_derived_key, - }, -}; - -/* Creates the function rfc3961_kdf_gen_params */ -KUNIT_ARRAY_PARAM(rfc3961_kdf, rfc3961_kdf_test_params, gss_krb5_get_desc); - static struct kunit_case rfc3961_test_cases[] = { { .name = "RFC 3961 n-fold", .run_case = rfc3961_nfold_case, .generate_params = rfc3961_nfold_gen_params, }, - { - .name = "RFC 3961 key derivation", - .run_case = kdf_case, - .generate_params = rfc3961_kdf_gen_params, - }, {} }; diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c index 7d6d4ae4a3c9..4fbc50a0a2c4 100644 --- a/net/sunrpc/auth_gss/gss_krb5_unseal.c +++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c @@ -69,83 +69,6 @@ # define RPCDBG_FACILITY RPCDBG_AUTH #endif - -#if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED) -/* read_token is a mic token, and message_buffer is the data that the mic was - * supposedly taken over. */ -u32 -gss_krb5_verify_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *message_buffer, - struct xdr_netobj *read_token) -{ - int signalg; - int sealalg; - char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; - struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), - .data = cksumdata}; - s32 now; - int direction; - u32 seqnum; - unsigned char *ptr = (unsigned char *)read_token->data; - int bodysize; - u8 *cksumkey; - - dprintk("RPC: krb5_read_token\n"); - - if (g_verify_token_header(&ctx->mech_used, &bodysize, &ptr, - read_token->len)) - return GSS_S_DEFECTIVE_TOKEN; - - if ((ptr[0] != ((KG_TOK_MIC_MSG >> 8) & 0xff)) || - (ptr[1] != (KG_TOK_MIC_MSG & 0xff))) - return GSS_S_DEFECTIVE_TOKEN; - - /* XXX sanity-check bodysize?? */ - - signalg = ptr[2] + (ptr[3] << 8); - if (signalg != ctx->gk5e->signalg) - return GSS_S_DEFECTIVE_TOKEN; - - sealalg = ptr[4] + (ptr[5] << 8); - if (sealalg != SEAL_ALG_NONE) - return GSS_S_DEFECTIVE_TOKEN; - - if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) - return GSS_S_DEFECTIVE_TOKEN; - - if (ctx->gk5e->keyed_cksum) - cksumkey = ctx->cksum; - else - cksumkey = NULL; - - if (make_checksum(ctx, ptr, 8, message_buffer, 0, - cksumkey, KG_USAGE_SIGN, &md5cksum)) - return GSS_S_FAILURE; - - if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN, - ctx->gk5e->cksumlength)) - return GSS_S_BAD_SIG; - - /* it got through unscathed. Make sure the context is unexpired */ - - now = ktime_get_real_seconds(); - - if (now > ctx->endtime) - return GSS_S_CONTEXT_EXPIRED; - - /* do sequencing checks */ - - if (krb5_get_seq_num(ctx, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, - &direction, &seqnum)) - return GSS_S_FAILURE; - - if ((ctx->initiate && direction != 0xff) || - (!ctx->initiate && direction != 0)) - return GSS_S_BAD_SIG; - - return GSS_S_COMPLETE; -} -#endif - u32 gss_krb5_verify_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *message_buffer, struct xdr_netobj *read_token) diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c index 6d6b082380b2..b3e1738ff6bf 100644 --- a/net/sunrpc/auth_gss/gss_krb5_wrap.c +++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c @@ -40,293 +40,6 @@ # define RPCDBG_FACILITY RPCDBG_AUTH #endif -#if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED) - -static inline int -gss_krb5_padding(int blocksize, int length) -{ - return blocksize - (length % blocksize); -} - -static inline void -gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) -{ - int padding = gss_krb5_padding(blocksize, buf->len - offset); - char *p; - struct kvec *iov; - - if (buf->page_len || buf->tail[0].iov_len) - iov = &buf->tail[0]; - else - iov = &buf->head[0]; - p = iov->iov_base + iov->iov_len; - iov->iov_len += padding; - buf->len += padding; - memset(p, padding, padding); -} - -static inline int -gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) -{ - u8 *ptr; - u8 pad; - size_t len = buf->len; - - if (len <= buf->head[0].iov_len) { - pad = *(u8 *)(buf->head[0].iov_base + len - 1); - if (pad > buf->head[0].iov_len) - return -EINVAL; - buf->head[0].iov_len -= pad; - goto out; - } else - len -= buf->head[0].iov_len; - if (len <= buf->page_len) { - unsigned int last = (buf->page_base + len - 1) - >>PAGE_SHIFT; - unsigned int offset = (buf->page_base + len - 1) - & (PAGE_SIZE - 1); - ptr = kmap_atomic(buf->pages[last]); - pad = *(ptr + offset); - kunmap_atomic(ptr); - goto out; - } else - len -= buf->page_len; - BUG_ON(len > buf->tail[0].iov_len); - pad = *(u8 *)(buf->tail[0].iov_base + len - 1); -out: - /* XXX: NOTE: we do not adjust the page lengths--they represent - * a range of data in the real filesystem page cache, and we need - * to know that range so the xdr code can properly place read data. - * However adjusting the head length, as we do above, is harmless. - * In the case of a request that fits into a single page, the server - * also uses length and head length together to determine the original - * start of the request to copy the request for deferal; so it's - * easier on the server if we adjust head and tail length in tandem. - * It's not really a problem that we don't fool with the page and - * tail lengths, though--at worst badly formed xdr might lead the - * server to attempt to parse the padding. - * XXX: Document all these weird requirements for gss mechanism - * wrap/unwrap functions. */ - if (pad > blocksize) - return -EINVAL; - if (buf->len > pad) - buf->len -= pad; - else - return -EINVAL; - return 0; -} - -/* Assumptions: the head and tail of inbuf are ours to play with. - * The pages, however, may be real pages in the page cache and we replace - * them with scratch pages from **pages before writing to them. */ -/* XXX: obviously the above should be documentation of wrap interface, - * and shouldn't be in this kerberos-specific file. */ - -/* XXX factor out common code with seal/unseal. */ - -u32 -gss_krb5_wrap_v1(struct krb5_ctx *kctx, int offset, - struct xdr_buf *buf, struct page **pages) -{ - char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; - struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), - .data = cksumdata}; - int blocksize = 0, plainlen; - unsigned char *ptr, *msg_start; - time64_t now; - int headlen; - struct page **tmp_pages; - u32 seq_send; - u8 *cksumkey; - u32 conflen = crypto_sync_skcipher_blocksize(kctx->enc); - - dprintk("RPC: %s\n", __func__); - - now = ktime_get_real_seconds(); - - blocksize = crypto_sync_skcipher_blocksize(kctx->enc); - gss_krb5_add_padding(buf, offset, blocksize); - BUG_ON((buf->len - offset) % blocksize); - plainlen = conflen + buf->len - offset; - - headlen = g_token_size(&kctx->mech_used, - GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) - - (buf->len - offset); - - ptr = buf->head[0].iov_base + offset; - /* shift data to make room for header. */ - xdr_extend_head(buf, offset, headlen); - - /* XXX Would be cleverer to encrypt while copying. */ - BUG_ON((buf->len - offset - headlen) % blocksize); - - g_make_token_header(&kctx->mech_used, - GSS_KRB5_TOK_HDR_LEN + - kctx->gk5e->cksumlength + plainlen, &ptr); - - - /* ptr now at header described in rfc 1964, section 1.2.1: */ - ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff); - ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff); - - msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength; - - /* - * signalg and sealalg are stored as if they were converted from LE - * to host endian, even though they're opaque pairs of bytes according - * to the RFC. - */ - *(__le16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg); - *(__le16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg); - ptr[6] = 0xff; - ptr[7] = 0xff; - - krb5_make_confounder(msg_start, conflen); - - if (kctx->gk5e->keyed_cksum) - cksumkey = kctx->cksum; - else - cksumkey = NULL; - - /* XXXJBF: UGH!: */ - tmp_pages = buf->pages; - buf->pages = pages; - if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen, - cksumkey, KG_USAGE_SEAL, &md5cksum)) - return GSS_S_FAILURE; - buf->pages = tmp_pages; - - memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); - - seq_send = atomic_fetch_inc(&kctx->seq_send); - - /* XXX would probably be more efficient to compute checksum - * and encrypt at the same time: */ - if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff, - seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) - return GSS_S_FAILURE; - - if (gss_encrypt_xdr_buf(kctx->enc, buf, - offset + headlen - conflen, pages)) - return GSS_S_FAILURE; - - return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; -} - -u32 -gss_krb5_unwrap_v1(struct krb5_ctx *kctx, int offset, int len, - struct xdr_buf *buf, unsigned int *slack, - unsigned int *align) -{ - int signalg; - int sealalg; - char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; - struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), - .data = cksumdata}; - time64_t now; - int direction; - s32 seqnum; - unsigned char *ptr; - int bodysize; - void *data_start, *orig_start; - int data_len; - int blocksize; - u32 conflen = crypto_sync_skcipher_blocksize(kctx->enc); - int crypt_offset; - u8 *cksumkey; - unsigned int saved_len = buf->len; - - dprintk("RPC: gss_unwrap_kerberos\n"); - - ptr = (u8 *)buf->head[0].iov_base + offset; - if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, - len - offset)) - return GSS_S_DEFECTIVE_TOKEN; - - if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) || - (ptr[1] != (KG_TOK_WRAP_MSG & 0xff))) - return GSS_S_DEFECTIVE_TOKEN; - - /* XXX sanity-check bodysize?? */ - - /* get the sign and seal algorithms */ - - signalg = ptr[2] + (ptr[3] << 8); - if (signalg != kctx->gk5e->signalg) - return GSS_S_DEFECTIVE_TOKEN; - - sealalg = ptr[4] + (ptr[5] << 8); - if (sealalg != kctx->gk5e->sealalg) - return GSS_S_DEFECTIVE_TOKEN; - - if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) - return GSS_S_DEFECTIVE_TOKEN; - - /* - * Data starts after token header and checksum. ptr points - * to the beginning of the token header - */ - crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) - - (unsigned char *)buf->head[0].iov_base; - - buf->len = len; - if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset)) - return GSS_S_DEFECTIVE_TOKEN; - - if (kctx->gk5e->keyed_cksum) - cksumkey = kctx->cksum; - else - cksumkey = NULL; - - if (make_checksum(kctx, ptr, 8, buf, crypt_offset, - cksumkey, KG_USAGE_SEAL, &md5cksum)) - return GSS_S_FAILURE; - - if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN, - kctx->gk5e->cksumlength)) - return GSS_S_BAD_SIG; - - /* it got through unscathed. Make sure the context is unexpired */ - - now = ktime_get_real_seconds(); - - if (now > kctx->endtime) - return GSS_S_CONTEXT_EXPIRED; - - /* do sequencing checks */ - - if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN, - ptr + 8, &direction, &seqnum)) - return GSS_S_BAD_SIG; - - if ((kctx->initiate && direction != 0xff) || - (!kctx->initiate && direction != 0)) - return GSS_S_BAD_SIG; - - /* Copy the data back to the right position. XXX: Would probably be - * better to copy and encrypt at the same time. */ - - blocksize = crypto_sync_skcipher_blocksize(kctx->enc); - data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) + - conflen; - orig_start = buf->head[0].iov_base + offset; - data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; - memmove(orig_start, data_start, data_len); - buf->head[0].iov_len -= (data_start - orig_start); - buf->len = len - (data_start - orig_start); - - if (gss_krb5_remove_padding(buf, blocksize)) - return GSS_S_DEFECTIVE_TOKEN; - - /* slack must include room for krb5 padding */ - *slack = XDR_QUADLEN(saved_len - buf->len); - /* The GSS blob always precedes the RPC message payload */ - *align = *slack; - return GSS_S_COMPLETE; -} - -#endif - /* * We can shift data by up to LOCAL_BUF_LEN bytes in a pass. If we need * to do more than that, we shift repeatedly. Kevin Coffman reports diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index c4a566737085..18734e70c5dd 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -986,7 +986,7 @@ bad_unwrap: return -EINVAL; } -static int +static enum svc_auth_status svcauth_gss_set_client(struct svc_rqst *rqstp) { struct gss_svc_data *svcdata = rqstp->rq_auth_data; @@ -1634,7 +1634,7 @@ svcauth_gss_decode_credbody(struct xdr_stream *xdr, * * The rqstp->rq_auth_stat field is also set (see RFCs 2203 and 5531). */ -static int +static enum svc_auth_status svcauth_gss_accept(struct svc_rqst *rqstp) { struct gss_svc_data *svcdata = rqstp->rq_auth_data; @@ -1945,9 +1945,6 @@ bad_wrap: * %0: the Reply is ready to be sent * %-ENOMEM: failed to allocate memory * %-EINVAL: encoding error - * - * XXX: These return values do not match the return values documented - * for the auth_ops ->release method in linux/sunrpc/svcauth.h. */ static int svcauth_gss_release(struct svc_rqst *rqstp) diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 587811a002c9..dc21e6c732db 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c @@ -513,9 +513,9 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools, INIT_LIST_HEAD(&pool->sp_all_threads); spin_lock_init(&pool->sp_lock); + percpu_counter_init(&pool->sp_messages_arrived, 0, GFP_KERNEL); percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL); percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL); - percpu_counter_init(&pool->sp_threads_timedout, 0, GFP_KERNEL); } return serv; @@ -588,9 +588,9 @@ svc_destroy(struct kref *ref) for (i = 0; i < serv->sv_nrpools; i++) { struct svc_pool *pool = &serv->sv_pools[i]; + percpu_counter_destroy(&pool->sp_messages_arrived); percpu_counter_destroy(&pool->sp_sockets_queued); percpu_counter_destroy(&pool->sp_threads_woken); - percpu_counter_destroy(&pool->sp_threads_timedout); } kfree(serv->sv_pools); kfree(serv); @@ -689,23 +689,44 @@ svc_prepare_thread(struct svc_serv *serv, struct svc_pool *pool, int node) return rqstp; } -/* - * Choose a pool in which to create a new thread, for svc_set_num_threads +/** + * svc_pool_wake_idle_thread - Awaken an idle thread in @pool + * @pool: service thread pool + * + * Can be called from soft IRQ or process context. Finding an idle + * service thread and marking it BUSY is atomic with respect to + * other calls to svc_pool_wake_idle_thread(). + * */ -static inline struct svc_pool * -choose_pool(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) +void svc_pool_wake_idle_thread(struct svc_pool *pool) { - if (pool != NULL) - return pool; + struct svc_rqst *rqstp; + + rcu_read_lock(); + list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { + if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) + continue; + + WRITE_ONCE(rqstp->rq_qtime, ktime_get()); + wake_up_process(rqstp->rq_task); + rcu_read_unlock(); + percpu_counter_inc(&pool->sp_threads_woken); + trace_svc_wake_up(rqstp->rq_task->pid); + return; + } + rcu_read_unlock(); - return &serv->sv_pools[(*state)++ % serv->sv_nrpools]; + set_bit(SP_CONGESTED, &pool->sp_flags); } -/* - * Choose a thread to kill, for svc_set_num_threads - */ -static inline struct task_struct * -choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) +static struct svc_pool * +svc_pool_next(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) +{ + return pool ? pool : &serv->sv_pools[(*state)++ % serv->sv_nrpools]; +} + +static struct task_struct * +svc_pool_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) { unsigned int i; struct task_struct *task = NULL; @@ -713,7 +734,6 @@ choose_victim(struct svc_serv *serv, struct svc_pool *pool, unsigned int *state) if (pool != NULL) { spin_lock_bh(&pool->sp_lock); } else { - /* choose a pool in round-robin fashion */ for (i = 0; i < serv->sv_nrpools; i++) { pool = &serv->sv_pools[--(*state) % serv->sv_nrpools]; spin_lock_bh(&pool->sp_lock); @@ -728,21 +748,15 @@ found_pool: if (!list_empty(&pool->sp_all_threads)) { struct svc_rqst *rqstp; - /* - * Remove from the pool->sp_all_threads list - * so we don't try to kill it again. - */ rqstp = list_entry(pool->sp_all_threads.next, struct svc_rqst, rq_all); set_bit(RQ_VICTIM, &rqstp->rq_flags); list_del_rcu(&rqstp->rq_all); task = rqstp->rq_task; } spin_unlock_bh(&pool->sp_lock); - return task; } -/* create new threads */ static int svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { @@ -754,13 +768,12 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) do { nrservs--; - chosen_pool = choose_pool(serv, pool, &state); - + chosen_pool = svc_pool_next(serv, pool, &state); node = svc_pool_map_get_node(chosen_pool->sp_id); + rqstp = svc_prepare_thread(serv, chosen_pool, node); if (IS_ERR(rqstp)) return PTR_ERR(rqstp); - task = kthread_create_on_node(serv->sv_threadfn, rqstp, node, "%s", serv->sv_name); if (IS_ERR(task)) { @@ -779,15 +792,6 @@ svc_start_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) return 0; } -/* - * Create or destroy enough new threads to make the number - * of threads the given number. If `pool' is non-NULL, applies - * only to threads in that pool, otherwise round-robins between - * all pools. Caller must ensure that mutual exclusion between this and - * server startup or shutdown. - */ - -/* destroy old threads */ static int svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { @@ -795,9 +799,8 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) struct task_struct *task; unsigned int state = serv->sv_nrthreads-1; - /* destroy old threads */ do { - task = choose_victim(serv, pool, &state); + task = svc_pool_victim(serv, pool, &state); if (task == NULL) break; rqstp = kthread_data(task); @@ -809,6 +812,23 @@ svc_stop_kthreads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) return 0; } +/** + * svc_set_num_threads - adjust number of threads per RPC service + * @serv: RPC service to adjust + * @pool: Specific pool from which to choose threads, or NULL + * @nrservs: New number of threads for @serv (0 or less means kill all threads) + * + * Create or destroy threads to make the number of threads for @serv the + * given number. If @pool is non-NULL, change only threads in that pool; + * otherwise, round-robin between all pools for @serv. @serv's + * sv_nrthreads is adjusted for each thread created or destroyed. + * + * Caller must ensure mutual exclusion between this and server startup or + * shutdown. + * + * Returns zero on success or a negative errno if an error occurred while + * starting a thread. + */ int svc_set_num_threads(struct svc_serv *serv, struct svc_pool *pool, int nrservs) { @@ -1277,8 +1297,9 @@ svc_process_common(struct svc_rqst *rqstp) const struct svc_procedure *procp = NULL; struct svc_serv *serv = rqstp->rq_server; struct svc_process_info process; - int auth_res, rc; + enum svc_auth_status auth_res; unsigned int aoffset; + int rc; __be32 *p; /* Will be turned off by GSS integrity and privacy services */ @@ -1333,6 +1354,9 @@ svc_process_common(struct svc_rqst *rqstp) goto dropit; case SVC_COMPLETE: goto sendit; + default: + pr_warn_once("Unexpected svc_auth_status (%d)\n", auth_res); + goto err_system_err; } if (progp == NULL) @@ -1516,7 +1540,6 @@ out_baddir: out_drop: svc_drop(rqstp); } -EXPORT_SYMBOL_GPL(svc_process); #if defined(CONFIG_SUNRPC_BACKCHANNEL) /* diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 62c7919ea610..4cfe9640df48 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -434,6 +434,7 @@ static bool svc_xprt_ready(struct svc_xprt *xprt) smp_rmb(); xpt_flags = READ_ONCE(xprt->xpt_flags); + trace_svc_xprt_enqueue(xprt, xpt_flags); if (xpt_flags & BIT(XPT_BUSY)) return false; if (xpt_flags & (BIT(XPT_CONN) | BIT(XPT_CLOSE) | BIT(XPT_HANDSHAKE))) @@ -456,7 +457,6 @@ static bool svc_xprt_ready(struct svc_xprt *xprt) void svc_xprt_enqueue(struct svc_xprt *xprt) { struct svc_pool *pool; - struct svc_rqst *rqstp = NULL; if (!svc_xprt_ready(xprt)) return; @@ -476,21 +476,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); spin_unlock_bh(&pool->sp_lock); - /* find a thread for this xprt */ - rcu_read_lock(); - list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { - if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) - continue; - percpu_counter_inc(&pool->sp_threads_woken); - rqstp->rq_qtime = ktime_get(); - wake_up_process(rqstp->rq_task); - goto out_unlock; - } - set_bit(SP_CONGESTED, &pool->sp_flags); - rqstp = NULL; -out_unlock: - rcu_read_unlock(); - trace_svc_xprt_enqueue(xprt, rqstp); + svc_pool_wake_idle_thread(pool); } EXPORT_SYMBOL_GPL(svc_xprt_enqueue); @@ -581,7 +567,10 @@ static void svc_xprt_release(struct svc_rqst *rqstp) svc_xprt_put(xprt); } -/* +/** + * svc_wake_up - Wake up a service thread for non-transport work + * @serv: RPC service + * * Some svc_serv's will have occasional work to do, even when a xprt is not * waiting to be serviced. This function is there to "kick" a task in one of * those services so that it can wake up and do that work. Note that we only @@ -590,27 +579,10 @@ static void svc_xprt_release(struct svc_rqst *rqstp) */ void svc_wake_up(struct svc_serv *serv) { - struct svc_rqst *rqstp; - struct svc_pool *pool; - - pool = &serv->sv_pools[0]; + struct svc_pool *pool = &serv->sv_pools[0]; - rcu_read_lock(); - list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { - /* skip any that aren't queued */ - if (test_bit(RQ_BUSY, &rqstp->rq_flags)) - continue; - rcu_read_unlock(); - wake_up_process(rqstp->rq_task); - trace_svc_wake_up(rqstp->rq_task->pid); - return; - } - rcu_read_unlock(); - - /* No free entries available */ set_bit(SP_TASK_PENDING, &pool->sp_flags); - smp_wmb(); - trace_svc_wake_up(0); + svc_pool_wake_idle_thread(pool); } EXPORT_SYMBOL_GPL(svc_wake_up); @@ -679,7 +651,7 @@ static void svc_check_conn_limits(struct svc_serv *serv) } } -static int svc_alloc_arg(struct svc_rqst *rqstp) +static bool svc_alloc_arg(struct svc_rqst *rqstp) { struct svc_serv *serv = rqstp->rq_server; struct xdr_buf *arg = &rqstp->rq_arg; @@ -701,10 +673,10 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) /* Made progress, don't sleep yet */ continue; - set_current_state(TASK_INTERRUPTIBLE); - if (signalled() || kthread_should_stop()) { + set_current_state(TASK_IDLE); + if (kthread_should_stop()) { set_current_state(TASK_RUNNING); - return -EINTR; + return false; } trace_svc_alloc_arg_err(pages, ret); memalloc_retry_wait(GFP_KERNEL); @@ -723,7 +695,7 @@ static int svc_alloc_arg(struct svc_rqst *rqstp) arg->tail[0].iov_len = 0; rqstp->rq_xid = xdr_zero; - return 0; + return true; } static bool @@ -732,7 +704,7 @@ rqst_should_sleep(struct svc_rqst *rqstp) struct svc_pool *pool = rqstp->rq_pool; /* did someone call svc_wake_up? */ - if (test_and_clear_bit(SP_TASK_PENDING, &pool->sp_flags)) + if (test_bit(SP_TASK_PENDING, &pool->sp_flags)) return false; /* was a socket queued? */ @@ -740,7 +712,7 @@ rqst_should_sleep(struct svc_rqst *rqstp) return false; /* are we shutting down? */ - if (signalled() || kthread_should_stop()) + if (kthread_should_stop()) return false; /* are we freezing? */ @@ -750,10 +722,9 @@ rqst_should_sleep(struct svc_rqst *rqstp) return true; } -static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) +static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp) { struct svc_pool *pool = rqstp->rq_pool; - long time_left = 0; /* rq_xprt should be clear on entry */ WARN_ON_ONCE(rqstp->rq_xprt); @@ -762,18 +733,14 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) if (rqstp->rq_xprt) goto out_found; - /* - * We have to be able to interrupt this wait - * to bring down the daemons ... - */ - set_current_state(TASK_INTERRUPTIBLE); + set_current_state(TASK_IDLE); smp_mb__before_atomic(); clear_bit(SP_CONGESTED, &pool->sp_flags); clear_bit(RQ_BUSY, &rqstp->rq_flags); smp_mb__after_atomic(); if (likely(rqst_should_sleep(rqstp))) - time_left = schedule_timeout(timeout); + schedule(); else __set_current_state(TASK_RUNNING); @@ -781,17 +748,16 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) set_bit(RQ_BUSY, &rqstp->rq_flags); smp_mb__after_atomic(); + clear_bit(SP_TASK_PENDING, &pool->sp_flags); rqstp->rq_xprt = svc_xprt_dequeue(pool); if (rqstp->rq_xprt) goto out_found; - if (!time_left) - percpu_counter_inc(&pool->sp_threads_timedout); - - if (signalled() || kthread_should_stop()) - return ERR_PTR(-EINTR); - return ERR_PTR(-EAGAIN); + if (kthread_should_stop()) + return NULL; + return NULL; out_found: + clear_bit(SP_TASK_PENDING, &pool->sp_flags); /* Normally we will wait up to 5 seconds for any required * cache information to be provided. */ @@ -867,37 +833,35 @@ out: return len; } -/* - * Receive the next request on any transport. This code is carefully - * organised not to touch any cachelines in the shared svc_serv - * structure, only cachelines in the local svc_pool. +/** + * svc_recv - Receive and process the next request on any transport + * @rqstp: an idle RPC service thread + * + * This code is carefully organised not to touch any cachelines in + * the shared svc_serv structure, only cachelines in the local + * svc_pool. */ -int svc_recv(struct svc_rqst *rqstp, long timeout) +void svc_recv(struct svc_rqst *rqstp) { struct svc_xprt *xprt = NULL; struct svc_serv *serv = rqstp->rq_server; - int len, err; + int len; - err = svc_alloc_arg(rqstp); - if (err) + if (!svc_alloc_arg(rqstp)) goto out; try_to_freeze(); cond_resched(); - err = -EINTR; - if (signalled() || kthread_should_stop()) + if (kthread_should_stop()) goto out; - xprt = svc_get_next_xprt(rqstp, timeout); - if (IS_ERR(xprt)) { - err = PTR_ERR(xprt); + xprt = svc_get_next_xprt(rqstp); + if (!xprt) goto out; - } len = svc_handle_xprt(rqstp, xprt); /* No data, incomplete (TCP) read, or accept() */ - err = -EAGAIN; if (len <= 0) goto out_release; @@ -909,13 +873,14 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) if (serv->sv_stats) serv->sv_stats->netcnt++; + percpu_counter_inc(&rqstp->rq_pool->sp_messages_arrived); rqstp->rq_stime = ktime_get(); - return len; + svc_process(rqstp); +out: + return; out_release: rqstp->rq_res.len = 0; svc_xprt_release(rqstp); -out: - return err; } EXPORT_SYMBOL_GPL(svc_recv); @@ -1456,12 +1421,11 @@ static int svc_pool_stats_show(struct seq_file *m, void *p) return 0; } - seq_printf(m, "%u %llu %llu %llu %llu\n", - pool->sp_id, - percpu_counter_sum_positive(&pool->sp_sockets_queued), - percpu_counter_sum_positive(&pool->sp_sockets_queued), - percpu_counter_sum_positive(&pool->sp_threads_woken), - percpu_counter_sum_positive(&pool->sp_threads_timedout)); + seq_printf(m, "%u %llu %llu %llu 0\n", + pool->sp_id, + percpu_counter_sum_positive(&pool->sp_messages_arrived), + percpu_counter_sum_positive(&pool->sp_sockets_queued), + percpu_counter_sum_positive(&pool->sp_threads_woken)); return 0; } diff --git a/net/sunrpc/svcauth.c b/net/sunrpc/svcauth.c index 67d8245a08af..aa4429d0b810 100644 --- a/net/sunrpc/svcauth.c +++ b/net/sunrpc/svcauth.c @@ -60,8 +60,19 @@ svc_put_auth_ops(struct auth_ops *aops) module_put(aops->owner); } -int -svc_authenticate(struct svc_rqst *rqstp) +/** + * svc_authenticate - Initialize an outgoing credential + * @rqstp: RPC execution context + * + * Return values: + * %SVC_OK: XDR encoding of the result can begin + * %SVC_DENIED: Credential or verifier is not valid + * %SVC_GARBAGE: Failed to decode credential or verifier + * %SVC_COMPLETE: GSS context lifetime event; no further action + * %SVC_DROP: Drop this request; no further action + * %SVC_CLOSE: Like drop, but also close transport connection + */ +enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp) { struct auth_ops *aops; u32 flavor; @@ -89,16 +100,28 @@ svc_authenticate(struct svc_rqst *rqstp) } EXPORT_SYMBOL_GPL(svc_authenticate); -int svc_set_client(struct svc_rqst *rqstp) +/** + * svc_set_client - Assign an appropriate 'auth_domain' as the client + * @rqstp: RPC execution context + * + * Return values: + * %SVC_OK: Client was found and assigned + * %SVC_DENY: Client was explicitly denied + * %SVC_DROP: Ignore this request + * %SVC_CLOSE: Ignore this request and close the connection + */ +enum svc_auth_status svc_set_client(struct svc_rqst *rqstp) { rqstp->rq_client = NULL; return rqstp->rq_authop->set_client(rqstp); } EXPORT_SYMBOL_GPL(svc_set_client); -/* A request, which was authenticated, has now executed. - * Time to finalise the credentials and verifier - * and release and resources +/** + * svc_authorise - Finalize credentials/verifier and release resources + * @rqstp: RPC execution context + * + * Returns zero on success, or a negative errno. */ int svc_authorise(struct svc_rqst *rqstp) { diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c index 174783f804fa..04b45588ae6f 100644 --- a/net/sunrpc/svcauth_unix.c +++ b/net/sunrpc/svcauth_unix.c @@ -665,7 +665,7 @@ static struct group_info *unix_gid_find(kuid_t uid, struct svc_rqst *rqstp) } } -int +enum svc_auth_status svcauth_unix_set_client(struct svc_rqst *rqstp) { struct sockaddr_in *sin; @@ -736,7 +736,6 @@ out: rqstp->rq_auth_stat = rpc_auth_ok; return SVC_OK; } - EXPORT_SYMBOL_GPL(svcauth_unix_set_client); /** @@ -751,7 +750,7 @@ EXPORT_SYMBOL_GPL(svcauth_unix_set_client); * * rqstp->rq_auth_stat is set as mandated by RFC 5531. */ -static int +static enum svc_auth_status svcauth_null_accept(struct svc_rqst *rqstp) { struct xdr_stream *xdr = &rqstp->rq_arg_stream; @@ -828,7 +827,7 @@ struct auth_ops svcauth_null = { * * rqstp->rq_auth_stat is set as mandated by RFC 5531. */ -static int +static enum svc_auth_status svcauth_tls_accept(struct svc_rqst *rqstp) { struct xdr_stream *xdr = &rqstp->rq_arg_stream; @@ -913,7 +912,7 @@ struct auth_ops svcauth_tls = { * * rqstp->rq_auth_stat is set as mandated by RFC 5531. */ -static int +static enum svc_auth_status svcauth_unix_accept(struct svc_rqst *rqstp) { struct xdr_stream *xdr = &rqstp->rq_arg_stream; diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 8c9a8ee76aa0..998687421fa6 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -36,6 +36,8 @@ #include <linux/skbuff.h> #include <linux/file.h> #include <linux/freezer.h> +#include <linux/bvec.h> + #include <net/sock.h> #include <net/checksum.h> #include <net/ip.h> @@ -695,9 +697,10 @@ static int svc_udp_sendto(struct svc_rqst *rqstp) .msg_name = &rqstp->rq_addr, .msg_namelen = rqstp->rq_addrlen, .msg_control = cmh, + .msg_flags = MSG_SPLICE_PAGES, .msg_controllen = sizeof(buffer), }; - unsigned int sent; + unsigned int count; int err; svc_udp_release_ctxt(xprt, rqstp->rq_xprt_ctxt); @@ -710,22 +713,23 @@ static int svc_udp_sendto(struct svc_rqst *rqstp) if (svc_xprt_is_dead(xprt)) goto out_notconn; - err = xdr_alloc_bvec(xdr, GFP_KERNEL); - if (err < 0) - goto out_unlock; + count = xdr_buf_to_bvec(rqstp->rq_bvec, + ARRAY_SIZE(rqstp->rq_bvec), xdr); - err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec, + count, 0); + err = sock_sendmsg(svsk->sk_sock, &msg); if (err == -ECONNREFUSED) { /* ICMP error on earlier request. */ - err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent); + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec, + count, 0); + err = sock_sendmsg(svsk->sk_sock, &msg); } - xdr_free_bvec(xdr); + trace_svcsock_udp_send(xprt, err); -out_unlock: + mutex_unlock(&xprt->xpt_mutex); - if (err < 0) - return err; - return sent; + return err; out_notconn: mutex_unlock(&xprt->xpt_mutex); @@ -1089,6 +1093,9 @@ static void svc_tcp_fragment_received(struct svc_sock *svsk) /* If we have more data, signal svc_xprt_enqueue() to try again */ svsk->sk_tcplen = 0; svsk->sk_marker = xdr_zero; + + smp_wmb(); + tcp_set_rcvlowat(svsk->sk_sk, 1); } /** @@ -1178,10 +1185,17 @@ err_incomplete: goto err_delete; if (len == want) svc_tcp_fragment_received(svsk); - else + else { + /* Avoid more ->sk_data_ready() calls until the rest + * of the message has arrived. This reduces service + * thread wake-ups on large incoming messages. */ + tcp_set_rcvlowat(svsk->sk_sk, + svc_sock_reclen(svsk) - svsk->sk_tcplen); + trace_svcsock_tcp_recv_short(&svsk->sk_xprt, svc_sock_reclen(svsk), svsk->sk_tcplen - sizeof(rpc_fraghdr)); + } goto err_noclose; error: if (len != -EAGAIN) @@ -1198,75 +1212,51 @@ err_noclose: return 0; /* record not complete */ } -static int svc_tcp_send_kvec(struct socket *sock, const struct kvec *vec, - int flags) -{ - struct msghdr msg = { .msg_flags = MSG_SPLICE_PAGES | flags, }; - - iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, vec, 1, vec->iov_len); - return sock_sendmsg(sock, &msg); -} - /* * MSG_SPLICE_PAGES is used exclusively to reduce the number of * copy operations in this path. Therefore the caller must ensure * that the pages backing @xdr are unchanging. * - * In addition, the logic assumes that * .bv_len is never larger - * than PAGE_SIZE. + * Note that the send is non-blocking. The caller has incremented + * the reference count on each page backing the RPC message, and + * the network layer will "put" these pages when transmission is + * complete. + * + * This is safe for our RPC services because the memory backing + * the head and tail components is never kmalloc'd. These always + * come from pages in the svc_rqst::rq_pages array. */ -static int svc_tcp_sendmsg(struct socket *sock, struct xdr_buf *xdr, +static int svc_tcp_sendmsg(struct svc_sock *svsk, struct svc_rqst *rqstp, rpc_fraghdr marker, unsigned int *sentp) { - const struct kvec *head = xdr->head; - const struct kvec *tail = xdr->tail; - struct kvec rm = { - .iov_base = &marker, - .iov_len = sizeof(marker), - }; struct msghdr msg = { - .msg_flags = 0, + .msg_flags = MSG_SPLICE_PAGES, }; + unsigned int count; + void *buf; int ret; *sentp = 0; - ret = xdr_alloc_bvec(xdr, GFP_KERNEL); - if (ret < 0) - return ret; - - ret = kernel_sendmsg(sock, &msg, &rm, 1, rm.iov_len); - if (ret < 0) - return ret; - *sentp += ret; - if (ret != rm.iov_len) - return -EAGAIN; - - ret = svc_tcp_send_kvec(sock, head, 0); - if (ret < 0) - return ret; - *sentp += ret; - if (ret != head->iov_len) - goto out; - if (xdr_buf_pagecount(xdr)) - xdr->bvec[0].bv_offset = offset_in_page(xdr->page_base); - - msg.msg_flags = MSG_SPLICE_PAGES; - iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, xdr->bvec, - xdr_buf_pagecount(xdr), xdr->page_len); - ret = sock_sendmsg(sock, &msg); + /* The stream record marker is copied into a temporary page + * fragment buffer so that it can be included in rq_bvec. + */ + buf = page_frag_alloc(&svsk->sk_frag_cache, sizeof(marker), + GFP_KERNEL); + if (!buf) + return -ENOMEM; + memcpy(buf, &marker, sizeof(marker)); + bvec_set_virt(rqstp->rq_bvec, buf, sizeof(marker)); + + count = xdr_buf_to_bvec(rqstp->rq_bvec + 1, + ARRAY_SIZE(rqstp->rq_bvec) - 1, &rqstp->rq_res); + + iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec, + 1 + count, sizeof(marker) + rqstp->rq_res.len); + ret = sock_sendmsg(svsk->sk_sock, &msg); if (ret < 0) return ret; *sentp += ret; - - if (tail->iov_len) { - ret = svc_tcp_send_kvec(sock, tail, 0); - if (ret < 0) - return ret; - *sentp += ret; - } - -out: return 0; } @@ -1292,23 +1282,17 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp) svc_tcp_release_ctxt(xprt, rqstp->rq_xprt_ctxt); rqstp->rq_xprt_ctxt = NULL; - atomic_inc(&svsk->sk_sendqlen); mutex_lock(&xprt->xpt_mutex); if (svc_xprt_is_dead(xprt)) goto out_notconn; - tcp_sock_set_cork(svsk->sk_sk, true); - err = svc_tcp_sendmsg(svsk->sk_sock, xdr, marker, &sent); - xdr_free_bvec(xdr); + err = svc_tcp_sendmsg(svsk, rqstp, marker, &sent); trace_svcsock_tcp_send(xprt, err < 0 ? (long)err : sent); if (err < 0 || sent != (xdr->len + sizeof(marker))) goto out_close; - if (atomic_dec_and_test(&svsk->sk_sendqlen)) - tcp_sock_set_cork(svsk->sk_sk, false); mutex_unlock(&xprt->xpt_mutex); return sent; out_notconn: - atomic_dec(&svsk->sk_sendqlen); mutex_unlock(&xprt->xpt_mutex); return -ENOTCONN; out_close: @@ -1317,7 +1301,6 @@ out_close: (err < 0) ? "got error" : "sent", (err < 0) ? err : sent, xdr->len); svc_xprt_deferred_close(xprt); - atomic_dec(&svsk->sk_sendqlen); mutex_unlock(&xprt->xpt_mutex); return -EAGAIN; } @@ -1644,6 +1627,7 @@ static void svc_tcp_sock_detach(struct svc_xprt *xprt) static void svc_sock_free(struct svc_xprt *xprt) { struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); + struct page_frag_cache *pfc = &svsk->sk_frag_cache; struct socket *sock = svsk->sk_sock; trace_svcsock_free(svsk, sock); @@ -1653,5 +1637,8 @@ static void svc_sock_free(struct svc_xprt *xprt) sockfd_put(sock); else sock_release(sock); + if (pfc->va) + __page_frag_cache_drain(virt_to_head_page(pfc->va), + pfc->pagecnt_bias); kfree(svsk); } diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 2a22e78af116..358e6de91775 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -165,6 +165,56 @@ xdr_free_bvec(struct xdr_buf *buf) } /** + * xdr_buf_to_bvec - Copy components of an xdr_buf into a bio_vec array + * @bvec: bio_vec array to populate + * @bvec_size: element count of @bio_vec + * @xdr: xdr_buf to be copied + * + * Returns the number of entries consumed in @bvec. + */ +unsigned int xdr_buf_to_bvec(struct bio_vec *bvec, unsigned int bvec_size, + const struct xdr_buf *xdr) +{ + const struct kvec *head = xdr->head; + const struct kvec *tail = xdr->tail; + unsigned int count = 0; + + if (head->iov_len) { + bvec_set_virt(bvec++, head->iov_base, head->iov_len); + ++count; + } + + if (xdr->page_len) { + unsigned int offset, len, remaining; + struct page **pages = xdr->pages; + + offset = offset_in_page(xdr->page_base); + remaining = xdr->page_len; + while (remaining > 0) { + len = min_t(unsigned int, remaining, + PAGE_SIZE - offset); + bvec_set_page(bvec++, *pages++, len, offset); + remaining -= len; + offset = 0; + if (unlikely(++count > bvec_size)) + goto bvec_overflow; + } + } + + if (tail->iov_len) { + bvec_set_virt(bvec, tail->iov_base, tail->iov_len); + if (unlikely(++count > bvec_size)) + goto bvec_overflow; + } + + return count; + +bvec_overflow: + pr_warn_once("%s: bio_vec array overflow\n", __func__); + return count - 1; +} + +/** * xdr_inline_pages - Prepare receive buffer for a large reply * @xdr: xdr_buf into which reply will be placed * @offset: expected offset where data payload will start, in bytes |