NFSD 6.3 Release Notes

Two significant security enhancements are part of this release:
 
 * NFSD's RPC header encoding and decoding, including RPCSEC GSS
   and gssproxy header parsing, has been overhauled to make it
   more memory-safe.
 
 * Support for Kerberos AES-SHA2-based encryption types has been
   added for both the NFS client and server. This provides a clean
   path for deprecating and removing insecure encryption types
   based on DES and SHA-1. AES-SHA2 is also FIPS-140 compliant, so
   that NFS with Kerberos may now be used on systems with fips
   enabled.
 
 In addition to these, NFSD is now able to handle crossing into an
 auto-mounted mount point on an exported NFS mount. A number of
 fixes have been made to NFSD's server-side copy implementation.
 
 RPC metrics have been converted to per-CPU variables. This helps
 reduce unnecessary cross-CPU and cross-node memory bus traffic,
 and significantly reduces noise when KCSAN is enabled.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEKLLlsBKG3yQ88j7+M2qzM29mf5cFAmPzgiYACgkQM2qzM29m
 f5dB2A//eqjpj+FgAN+UjygrwMC4ahAsPX3Sc3FG8/lTAiao3NFVFY2gxAiCPyVE
 CFk+tUyfL23oXvbyfIBe3LhxSBOf621xU6up2OzqAzJqh1Q9iUWB6as3I14to8ZU
 sWpxXo5ofwk1hzkbrvOAVkyfY0emwsr00iBeWMawkpBe8FZEQA31OYj3/xHr6bBI
 zEVlZPBZAZlp0DZ74tb+bBLs/EOnqKj+XLWcogCH13JB3sn2umF6cQNkYgsxvHGa
 TNQi4LEdzWZGme242LfBRiGGwm1xuVIjlAhYV/R1wIjaknE3QBzqfXc6lJx74WII
 HaqpRJGrKqdo7B+1gaXCl/AMS7YluED1CBrxuej0wBG7l2JEB7m2MFMQ4LTQjgsn
 nrr3P70DgbB4LuPCPyUS7dtsMmUXabIqP7niiCR4T1toH6lBmHAgEi4cFmkzg7Cd
 EoFzn888mtDpfx4fghcsRWS5oKXEzbPJfu5+IZOD63+UB+NGpi0Xo2s23sJPK8vz
 kqK/X63JYOUxWUvK0zkj/c/wW1cLqIaBwnSKbShou5/BL+cZVI+uJYrnEesgpoB2
 5fh/cZv3hdcoOPO7OfcjCLQYy4J6RCWajptnk/hcS3lMvBTBrnq697iAqCVURDKU
 Xfmlf7XbBwje+sk4eHgqVGEqqVjrEmoqbmA2OS44WSS5LDvxXdI=
 =ZG/7
 -----END PGP SIGNATURE-----

Merge tag 'nfsd-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux

Pull nfsd updates from Chuck Lever:
 "Two significant security enhancements are part of this release:

   - NFSD's RPC header encoding and decoding, including RPCSEC GSS and
     gssproxy header parsing, has been overhauled to make it more
     memory-safe.

   - Support for Kerberos AES-SHA2-based encryption types has been added
     for both the NFS client and server. This provides a clean path for
     deprecating and removing insecure encryption types based on DES and
     SHA-1. AES-SHA2 is also FIPS-140 compliant, so that NFS with
     Kerberos may now be used on systems with fips enabled.

  In addition to these, NFSD is now able to handle crossing into an
  auto-mounted mount point on an exported NFS mount. A number of fixes
  have been made to NFSD's server-side copy implementation.

  RPC metrics have been converted to per-CPU variables. This helps
  reduce unnecessary cross-CPU and cross-node memory bus traffic, and
  significantly reduces noise when KCSAN is enabled"

* tag 'nfsd-6.3' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux: (121 commits)
  NFSD: Clean up nfsd_symlink()
  NFSD: copy the whole verifier in nfsd_copy_write_verifier
  nfsd: don't fsync nfsd_files on last close
  SUNRPC: Fix occasional warning when destroying gss_krb5_enctypes
  nfsd: fix courtesy client with deny mode handling in nfs4_upgrade_open
  NFSD: fix problems with cleanup on errors in nfsd4_copy
  nfsd: fix race to check ls_layouts
  nfsd: don't hand out delegation on setuid files being opened for write
  SUNRPC: Remove ->xpo_secure_port()
  SUNRPC: Clean up the svc_xprt_flags() macro
  nfsd: remove fs/nfsd/fault_inject.c
  NFSD: fix leaked reference count of nfsd4_ssc_umount_item
  nfsd: clean up potential nfsd_file refcount leaks in COPY codepath
  nfsd: zero out pointers after putting nfsd_files on COPY setup error
  SUNRPC: Fix whitespace damage in svcauth_unix.c
  nfsd: eliminate __nfs4_get_fd
  nfsd: add some kerneldoc comments for stateid preprocessing functions
  nfsd: eliminate find_deleg_file_locked
  nfsd: don't take nfsd4_copy ref for OP_OFFLOAD_STATUS
  SUNRPC: Add encryption self-tests
  ...
This commit is contained in:
Linus Torvalds 2023-02-22 14:21:40 -08:00
commit 9fc2f99030
56 changed files with 5234 additions and 2086 deletions

View File

@ -685,17 +685,16 @@ module_exit(exit_nlm);
/**
* nlmsvc_dispatch - Process an NLM Request
* @rqstp: incoming request
* @statp: pointer to location of accept_stat field in RPC Reply buffer
*
* Return values:
* %0: Processing complete; do not send a Reply
* %1: Processing complete; send Reply in rqstp->rq_res
*/
static int nlmsvc_dispatch(struct svc_rqst *rqstp, __be32 *statp)
static int nlmsvc_dispatch(struct svc_rqst *rqstp)
{
const struct svc_procedure *procp = rqstp->rq_procinfo;
__be32 *statp = rqstp->rq_accept_statp;
svcxdr_init_decode(rqstp);
if (!procp->pc_decode(rqstp, &rqstp->rq_arg_stream))
goto out_decode_err;
@ -705,7 +704,6 @@ static int nlmsvc_dispatch(struct svc_rqst *rqstp, __be32 *statp)
if (*statp != rpc_success)
return 1;
svcxdr_init_encode(rqstp);
if (!procp->pc_encode(rqstp, &rqstp->rq_res_stream))
goto out_encode_err;
@ -723,7 +721,7 @@ out_encode_err:
/*
* Define NLM program and procedures
*/
static unsigned int nlmsvc_version1_count[17];
static DEFINE_PER_CPU_ALIGNED(unsigned long, nlmsvc_version1_count[17]);
static const struct svc_version nlmsvc_version1 = {
.vs_vers = 1,
.vs_nproc = 17,
@ -732,26 +730,31 @@ static const struct svc_version nlmsvc_version1 = {
.vs_dispatch = nlmsvc_dispatch,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
static unsigned int nlmsvc_version3_count[24];
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nlmsvc_version3_count[ARRAY_SIZE(nlmsvc_procedures)]);
static const struct svc_version nlmsvc_version3 = {
.vs_vers = 3,
.vs_nproc = 24,
.vs_nproc = ARRAY_SIZE(nlmsvc_procedures),
.vs_proc = nlmsvc_procedures,
.vs_count = nlmsvc_version3_count,
.vs_dispatch = nlmsvc_dispatch,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
#ifdef CONFIG_LOCKD_V4
static unsigned int nlmsvc_version4_count[24];
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nlmsvc_version4_count[ARRAY_SIZE(nlmsvc_procedures4)]);
static const struct svc_version nlmsvc_version4 = {
.vs_vers = 4,
.vs_nproc = 24,
.vs_nproc = ARRAY_SIZE(nlmsvc_procedures4),
.vs_proc = nlmsvc_procedures4,
.vs_count = nlmsvc_version4_count,
.vs_dispatch = nlmsvc_dispatch,
.vs_xdrsize = NLMSVC_XDRSIZE,
};
#endif
static const struct svc_version *nlmsvc_version[] = {
[1] = &nlmsvc_version1,
[3] = &nlmsvc_version3,

View File

@ -1459,11 +1459,11 @@ EXPORT_SYMBOL(follow_down_one);
* point, the filesystem owning that dentry may be queried as to whether the
* caller is permitted to proceed or not.
*/
int follow_down(struct path *path)
int follow_down(struct path *path, unsigned int flags)
{
struct vfsmount *mnt = path->mnt;
bool jumped;
int ret = traverse_mounts(path, &jumped, NULL, 0);
int ret = traverse_mounts(path, &jumped, NULL, flags);
if (path->mnt != mnt)
mntput(mnt);
@ -2865,7 +2865,7 @@ int path_pts(struct path *path)
path->dentry = child;
dput(parent);
follow_down(path);
follow_down(path, 0);
return 0;
}
#endif

View File

@ -980,14 +980,11 @@ out_invalidcred:
}
static int
nfs_callback_dispatch(struct svc_rqst *rqstp, __be32 *statp)
nfs_callback_dispatch(struct svc_rqst *rqstp)
{
const struct svc_procedure *procp = rqstp->rq_procinfo;
svcxdr_init_decode(rqstp);
svcxdr_init_encode(rqstp);
*statp = procp->pc_func(rqstp);
*rqstp->rq_accept_statp = procp->pc_func(rqstp);
return 1;
}
@ -1072,7 +1069,8 @@ static const struct svc_procedure nfs4_callback_procedures1[] = {
}
};
static unsigned int nfs4_callback_count1[ARRAY_SIZE(nfs4_callback_procedures1)];
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nfs4_callback_count1[ARRAY_SIZE(nfs4_callback_procedures1)]);
const struct svc_version nfs4_callback_version1 = {
.vs_vers = 1,
.vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),
@ -1084,7 +1082,8 @@ const struct svc_version nfs4_callback_version1 = {
.vs_need_cong_ctrl = true,
};
static unsigned int nfs4_callback_count4[ARRAY_SIZE(nfs4_callback_procedures1)];
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nfs4_callback_count4[ARRAY_SIZE(nfs4_callback_procedures1)]);
const struct svc_version nfs4_callback_version4 = {
.vs_vers = 4,
.vs_nproc = ARRAY_SIZE(nfs4_callback_procedures1),

View File

@ -42,7 +42,7 @@ nfs_encode_fh(struct inode *inode, __u32 *p, int *max_len, struct inode *parent)
dprintk("%s: max fh len %d inode %p parent %p",
__func__, *max_len, inode, parent);
if (*max_len < len || IS_AUTOMOUNT(inode)) {
if (*max_len < len) {
dprintk("%s: fh len %d too small, required %d\n",
__func__, *max_len, len);
*max_len = len;

View File

@ -1,142 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2011 Bryan Schumaker <bjschuma@netapp.com>
*
* Uses debugfs to create fault injection points for client testing
*/
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/nsproxy.h>
#include <linux/sunrpc/addr.h>
#include <linux/uaccess.h>
#include <linux/kernel.h>
#include "state.h"
#include "netns.h"
struct nfsd_fault_inject_op {
char *file;
u64 (*get)(void);
u64 (*set_val)(u64);
u64 (*set_clnt)(struct sockaddr_storage *, size_t);
};
static struct dentry *debug_dir;
static ssize_t fault_inject_read(struct file *file, char __user *buf,
size_t len, loff_t *ppos)
{
static u64 val;
char read_buf[25];
size_t size;
loff_t pos = *ppos;
struct nfsd_fault_inject_op *op = file_inode(file)->i_private;
if (!pos)
val = op->get();
size = scnprintf(read_buf, sizeof(read_buf), "%llu\n", val);
return simple_read_from_buffer(buf, len, ppos, read_buf, size);
}
static ssize_t fault_inject_write(struct file *file, const char __user *buf,
size_t len, loff_t *ppos)
{
char write_buf[INET6_ADDRSTRLEN];
size_t size = min(sizeof(write_buf) - 1, len);
struct net *net = current->nsproxy->net_ns;
struct sockaddr_storage sa;
struct nfsd_fault_inject_op *op = file_inode(file)->i_private;
u64 val;
char *nl;
if (copy_from_user(write_buf, buf, size))
return -EFAULT;
write_buf[size] = '\0';
/* Deal with any embedded newlines in the string */
nl = strchr(write_buf, '\n');
if (nl) {
size = nl - write_buf;
*nl = '\0';
}
size = rpc_pton(net, write_buf, size, (struct sockaddr *)&sa, sizeof(sa));
if (size > 0) {
val = op->set_clnt(&sa, size);
if (val)
pr_info("NFSD [%s]: Client %s had %llu state object(s)\n",
op->file, write_buf, val);
} else {
val = simple_strtoll(write_buf, NULL, 0);
if (val == 0)
pr_info("NFSD Fault Injection: %s (all)", op->file);
else
pr_info("NFSD Fault Injection: %s (n = %llu)",
op->file, val);
val = op->set_val(val);
pr_info("NFSD: %s: found %llu", op->file, val);
}
return len; /* on success, claim we got the whole input */
}
static const struct file_operations fops_nfsd = {
.owner = THIS_MODULE,
.read = fault_inject_read,
.write = fault_inject_write,
};
void nfsd_fault_inject_cleanup(void)
{
debugfs_remove_recursive(debug_dir);
}
static struct nfsd_fault_inject_op inject_ops[] = {
{
.file = "forget_clients",
.get = nfsd_inject_print_clients,
.set_val = nfsd_inject_forget_clients,
.set_clnt = nfsd_inject_forget_client,
},
{
.file = "forget_locks",
.get = nfsd_inject_print_locks,
.set_val = nfsd_inject_forget_locks,
.set_clnt = nfsd_inject_forget_client_locks,
},
{
.file = "forget_openowners",
.get = nfsd_inject_print_openowners,
.set_val = nfsd_inject_forget_openowners,
.set_clnt = nfsd_inject_forget_client_openowners,
},
{
.file = "forget_delegations",
.get = nfsd_inject_print_delegations,
.set_val = nfsd_inject_forget_delegations,
.set_clnt = nfsd_inject_forget_client_delegations,
},
{
.file = "recall_delegations",
.get = nfsd_inject_print_delegations,
.set_val = nfsd_inject_recall_delegations,
.set_clnt = nfsd_inject_recall_client_delegations,
},
};
void nfsd_fault_inject_init(void)
{
unsigned int i;
struct nfsd_fault_inject_op *op;
umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;
debug_dir = debugfs_create_dir("nfsd", NULL);
for (i = 0; i < ARRAY_SIZE(inject_ops); i++) {
op = &inject_ops[i];
debugfs_create_file(op->file, mode, debug_dir, op, &fops_nfsd);
}
}

View File

@ -331,37 +331,27 @@ nfsd_file_alloc(struct nfsd_file_lookup_key *key, unsigned int may)
return nf;
}
/**
* nfsd_file_check_write_error - check for writeback errors on a file
* @nf: nfsd_file to check for writeback errors
*
* Check whether a nfsd_file has an unseen error. Reset the write
* verifier if so.
*/
static void
nfsd_file_fsync(struct nfsd_file *nf)
{
struct file *file = nf->nf_file;
int ret;
if (!file || !(file->f_mode & FMODE_WRITE))
return;
ret = vfs_fsync(file, 1);
trace_nfsd_file_fsync(nf, ret);
if (ret)
nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
}
static int
nfsd_file_check_write_error(struct nfsd_file *nf)
{
struct file *file = nf->nf_file;
if (!file || !(file->f_mode & FMODE_WRITE))
return 0;
return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
if ((file->f_mode & FMODE_WRITE) &&
filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err)))
nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
}
static void
nfsd_file_hash_remove(struct nfsd_file *nf)
{
trace_nfsd_file_unhash(nf);
if (nfsd_file_check_write_error(nf))
nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
rhashtable_remove_fast(&nfsd_file_rhash_tbl, &nf->nf_rhash,
nfsd_file_rhash_params);
}
@ -387,23 +377,12 @@ nfsd_file_free(struct nfsd_file *nf)
this_cpu_add(nfsd_file_total_age, age);
nfsd_file_unhash(nf);
/*
* We call fsync here in order to catch writeback errors. It's not
* strictly required by the protocol, but an nfsd_file could get
* evicted from the cache before a COMMIT comes in. If another
* task were to open that file in the interim and scrape the error,
* then the client may never see it. By calling fsync here, we ensure
* that writeback happens before the entry is freed, and that any
* errors reported result in the write verifier changing.
*/
nfsd_file_fsync(nf);
if (nf->nf_mark)
nfsd_file_mark_put(nf->nf_mark);
if (nf->nf_file) {
get_file(nf->nf_file);
filp_close(nf->nf_file, NULL);
nfsd_file_check_write_error(nf);
fput(nf->nf_file);
}
@ -452,7 +431,7 @@ static bool nfsd_file_lru_remove(struct nfsd_file *nf)
struct nfsd_file *
nfsd_file_get(struct nfsd_file *nf)
{
if (likely(refcount_inc_not_zero(&nf->nf_ref)))
if (nf && refcount_inc_not_zero(&nf->nf_ref))
return nf;
return NULL;
}
@ -1107,8 +1086,7 @@ retry:
rcu_read_lock();
nf = rhashtable_lookup(&nfsd_file_rhash_tbl, &key,
nfsd_file_rhash_params);
if (nf)
nf = nfsd_file_get(nf);
nf = nfsd_file_get(nf);
rcu_read_unlock();
if (nf) {
@ -1159,6 +1137,7 @@ wait_for_construction:
out:
if (status == nfs_ok) {
this_cpu_inc(nfsd_file_acquisitions);
nfsd_file_check_write_error(nf);
*pnf = nf;
} else {
if (refcount_dec_and_test(&nf->nf_ref))

View File

@ -377,10 +377,11 @@ static const struct svc_procedure nfsd_acl_procedures2[5] = {
},
};
static unsigned int nfsd_acl_count2[ARRAY_SIZE(nfsd_acl_procedures2)];
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nfsd_acl_count2[ARRAY_SIZE(nfsd_acl_procedures2)]);
const struct svc_version nfsd_acl_version2 = {
.vs_vers = 2,
.vs_nproc = 5,
.vs_nproc = ARRAY_SIZE(nfsd_acl_procedures2),
.vs_proc = nfsd_acl_procedures2,
.vs_count = nfsd_acl_count2,
.vs_dispatch = nfsd_dispatch,

View File

@ -266,10 +266,11 @@ static const struct svc_procedure nfsd_acl_procedures3[3] = {
},
};
static unsigned int nfsd_acl_count3[ARRAY_SIZE(nfsd_acl_procedures3)];
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nfsd_acl_count3[ARRAY_SIZE(nfsd_acl_procedures3)]);
const struct svc_version nfsd_acl_version3 = {
.vs_vers = 3,
.vs_nproc = 3,
.vs_nproc = ARRAY_SIZE(nfsd_acl_procedures3),
.vs_proc = nfsd_acl_procedures3,
.vs_count = nfsd_acl_count3,
.vs_dispatch = nfsd_dispatch,

View File

@ -1064,10 +1064,11 @@ static const struct svc_procedure nfsd_procedures3[22] = {
},
};
static unsigned int nfsd_count3[ARRAY_SIZE(nfsd_procedures3)];
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nfsd_count3[ARRAY_SIZE(nfsd_procedures3)]);
const struct svc_version nfsd_version3 = {
.vs_vers = 3,
.vs_nproc = 22,
.vs_nproc = ARRAY_SIZE(nfsd_procedures3),
.vs_proc = nfsd_procedures3,
.vs_dispatch = nfsd_dispatch,
.vs_count = nfsd_count3,

View File

@ -323,11 +323,11 @@ nfsd4_recall_file_layout(struct nfs4_layout_stateid *ls)
if (ls->ls_recalled)
goto out_unlock;
ls->ls_recalled = true;
atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
if (list_empty(&ls->ls_layouts))
goto out_unlock;
ls->ls_recalled = true;
atomic_inc(&ls->ls_stid.sc_file->fi_lo_recalls);
trace_nfsd_layout_recall(&ls->ls_stid.sc_stateid);
refcount_inc(&ls->ls_stid.sc_count);

View File

@ -1214,8 +1214,10 @@ out:
return status;
out_put_dst:
nfsd_file_put(*dst);
*dst = NULL;
out_put_src:
nfsd_file_put(*src);
*src = NULL;
goto out;
}
@ -1293,15 +1295,15 @@ extern void nfs_sb_deactive(struct super_block *sb);
* setup a work entry in the ssc delayed unmount list.
*/
static __be32 nfsd4_ssc_setup_dul(struct nfsd_net *nn, char *ipaddr,
struct nfsd4_ssc_umount_item **retwork, struct vfsmount **ss_mnt)
struct nfsd4_ssc_umount_item **nsui)
{
struct nfsd4_ssc_umount_item *ni = NULL;
struct nfsd4_ssc_umount_item *work = NULL;
struct nfsd4_ssc_umount_item *tmp;
DEFINE_WAIT(wait);
__be32 status = 0;
*ss_mnt = NULL;
*retwork = NULL;
*nsui = NULL;
work = kzalloc(sizeof(*work), GFP_KERNEL);
try_again:
spin_lock(&nn->nfsd_ssc_lock);
@ -1325,12 +1327,12 @@ try_again:
finish_wait(&nn->nfsd_ssc_waitq, &wait);
goto try_again;
}
*ss_mnt = ni->nsui_vfsmount;
*nsui = ni;
refcount_inc(&ni->nsui_refcnt);
spin_unlock(&nn->nfsd_ssc_lock);
kfree(work);
/* return vfsmount in ss_mnt */
/* return vfsmount in (*nsui)->nsui_vfsmount */
return 0;
}
if (work) {
@ -1338,31 +1340,32 @@ try_again:
refcount_set(&work->nsui_refcnt, 2);
work->nsui_busy = true;
list_add_tail(&work->nsui_list, &nn->nfsd_ssc_mount_list);
*retwork = work;
}
*nsui = work;
} else
status = nfserr_resource;
spin_unlock(&nn->nfsd_ssc_lock);
return 0;
return status;
}
static void nfsd4_ssc_update_dul_work(struct nfsd_net *nn,
struct nfsd4_ssc_umount_item *work, struct vfsmount *ss_mnt)
static void nfsd4_ssc_update_dul(struct nfsd_net *nn,
struct nfsd4_ssc_umount_item *nsui,
struct vfsmount *ss_mnt)
{
/* set nsui_vfsmount, clear busy flag and wakeup waiters */
spin_lock(&nn->nfsd_ssc_lock);
work->nsui_vfsmount = ss_mnt;
work->nsui_busy = false;
nsui->nsui_vfsmount = ss_mnt;
nsui->nsui_busy = false;
wake_up_all(&nn->nfsd_ssc_waitq);
spin_unlock(&nn->nfsd_ssc_lock);
}
static void nfsd4_ssc_cancel_dul_work(struct nfsd_net *nn,
struct nfsd4_ssc_umount_item *work)
static void nfsd4_ssc_cancel_dul(struct nfsd_net *nn,
struct nfsd4_ssc_umount_item *nsui)
{
spin_lock(&nn->nfsd_ssc_lock);
list_del(&work->nsui_list);
list_del(&nsui->nsui_list);
wake_up_all(&nn->nfsd_ssc_waitq);
spin_unlock(&nn->nfsd_ssc_lock);
kfree(work);
kfree(nsui);
}
/*
@ -1370,7 +1373,7 @@ static void nfsd4_ssc_cancel_dul_work(struct nfsd_net *nn,
*/
static __be32
nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
struct vfsmount **mount)
struct nfsd4_ssc_umount_item **nsui)
{
struct file_system_type *type;
struct vfsmount *ss_mnt;
@ -1381,7 +1384,6 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
char *ipaddr, *dev_name, *raw_data;
int len, raw_len;
__be32 status = nfserr_inval;
struct nfsd4_ssc_umount_item *work = NULL;
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
naddr = &nss->u.nl4_addr;
@ -1389,6 +1391,7 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
naddr->addr_len,
(struct sockaddr *)&tmp_addr,
sizeof(tmp_addr));
*nsui = NULL;
if (tmp_addrlen == 0)
goto out_err;
@ -1431,10 +1434,10 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
goto out_free_rawdata;
snprintf(dev_name, len + 5, "%s%s%s:/", startsep, ipaddr, endsep);
status = nfsd4_ssc_setup_dul(nn, ipaddr, &work, &ss_mnt);
status = nfsd4_ssc_setup_dul(nn, ipaddr, nsui);
if (status)
goto out_free_devname;
if (ss_mnt)
if ((*nsui)->nsui_vfsmount)
goto out_done;
/* Use an 'internal' mount: SB_KERNMOUNT -> MNT_INTERNAL */
@ -1442,15 +1445,12 @@ nfsd4_interssc_connect(struct nl4_server *nss, struct svc_rqst *rqstp,
module_put(type->owner);
if (IS_ERR(ss_mnt)) {
status = nfserr_nodev;
if (work)
nfsd4_ssc_cancel_dul_work(nn, work);
nfsd4_ssc_cancel_dul(nn, *nsui);
goto out_free_devname;
}
if (work)
nfsd4_ssc_update_dul_work(nn, work, ss_mnt);
nfsd4_ssc_update_dul(nn, *nsui, ss_mnt);
out_done:
status = 0;
*mount = ss_mnt;
out_free_devname:
kfree(dev_name);
@ -1474,7 +1474,7 @@ out_err:
static __be32
nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_copy *copy, struct vfsmount **mount)
struct nfsd4_copy *copy)
{
struct svc_fh *s_fh = NULL;
stateid_t *s_stid = &copy->cp_src_stateid;
@ -1487,7 +1487,7 @@ nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
if (status)
goto out;
status = nfsd4_interssc_connect(copy->cp_src, rqstp, mount);
status = nfsd4_interssc_connect(copy->cp_src, rqstp, &copy->ss_nsui);
if (status)
goto out;
@ -1505,45 +1505,26 @@ out:
}
static void
nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct file *filp,
nfsd4_cleanup_inter_ssc(struct nfsd4_ssc_umount_item *nsui, struct file *filp,
struct nfsd_file *dst)
{
bool found = false;
long timeout;
struct nfsd4_ssc_umount_item *tmp;
struct nfsd4_ssc_umount_item *ni = NULL;
struct nfsd_net *nn = net_generic(dst->nf_net, nfsd_net_id);
long timeout = msecs_to_jiffies(nfsd4_ssc_umount_timeout);
nfs42_ssc_close(filp);
nfsd_file_put(dst);
fput(filp);
if (!nn) {
mntput(ss_mnt);
return;
}
spin_lock(&nn->nfsd_ssc_lock);
timeout = msecs_to_jiffies(nfsd4_ssc_umount_timeout);
list_for_each_entry_safe(ni, tmp, &nn->nfsd_ssc_mount_list, nsui_list) {
if (ni->nsui_vfsmount->mnt_sb == ss_mnt->mnt_sb) {
list_del(&ni->nsui_list);
/*
* vfsmount can be shared by multiple exports,
* decrement refcnt. If the count drops to 1 it
* will be unmounted when nsui_expire expires.
*/
refcount_dec(&ni->nsui_refcnt);
ni->nsui_expire = jiffies + timeout;
list_add_tail(&ni->nsui_list, &nn->nfsd_ssc_mount_list);
found = true;
break;
}
}
list_del(&nsui->nsui_list);
/*
* vfsmount can be shared by multiple exports,
* decrement refcnt. If the count drops to 1 it
* will be unmounted when nsui_expire expires.
*/
refcount_dec(&nsui->nsui_refcnt);
nsui->nsui_expire = jiffies + timeout;
list_add_tail(&nsui->nsui_list, &nn->nfsd_ssc_mount_list);
spin_unlock(&nn->nfsd_ssc_lock);
if (!found) {
mntput(ss_mnt);
return;
}
}
#else /* CONFIG_NFSD_V4_2_INTER_SSC */
@ -1551,15 +1532,13 @@ nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct file *filp,
static __be32
nfsd4_setup_inter_ssc(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
struct nfsd4_copy *copy,
struct vfsmount **mount)
struct nfsd4_copy *copy)
{
*mount = NULL;
return nfserr_inval;
}
static void
nfsd4_cleanup_inter_ssc(struct vfsmount *ss_mnt, struct file *filp,
nfsd4_cleanup_inter_ssc(struct nfsd4_ssc_umount_item *nsui, struct file *filp,
struct nfsd_file *dst)
{
}
@ -1582,13 +1561,6 @@ nfsd4_setup_intra_ssc(struct svc_rqst *rqstp,
&copy->nf_dst);
}
static void
nfsd4_cleanup_intra_ssc(struct nfsd_file *src, struct nfsd_file *dst)
{
nfsd_file_put(src);
nfsd_file_put(dst);
}
static void nfsd4_cb_offload_release(struct nfsd4_callback *cb)
{
struct nfsd4_cb_offload *cbo =
@ -1700,18 +1672,27 @@ static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
memcpy(dst->cp_src, src->cp_src, sizeof(struct nl4_server));
memcpy(&dst->stateid, &src->stateid, sizeof(src->stateid));
memcpy(&dst->c_fh, &src->c_fh, sizeof(src->c_fh));
dst->ss_mnt = src->ss_mnt;
dst->ss_nsui = src->ss_nsui;
}
static void release_copy_files(struct nfsd4_copy *copy)
{
if (copy->nf_src)
nfsd_file_put(copy->nf_src);
if (copy->nf_dst)
nfsd_file_put(copy->nf_dst);
}
static void cleanup_async_copy(struct nfsd4_copy *copy)
{
nfs4_free_copy_state(copy);
nfsd_file_put(copy->nf_dst);
if (!nfsd4_ssc_is_inter(copy))
nfsd_file_put(copy->nf_src);
spin_lock(&copy->cp_clp->async_lock);
list_del(&copy->copies);
spin_unlock(&copy->cp_clp->async_lock);
release_copy_files(copy);
if (copy->cp_clp) {
spin_lock(&copy->cp_clp->async_lock);
if (!list_empty(&copy->copies))
list_del_init(&copy->copies);
spin_unlock(&copy->cp_clp->async_lock);
}
nfs4_put_copy(copy);
}
@ -1749,8 +1730,8 @@ static int nfsd4_do_async_copy(void *data)
if (nfsd4_ssc_is_inter(copy)) {
struct file *filp;
filp = nfs42_ssc_open(copy->ss_mnt, &copy->c_fh,
&copy->stateid);
filp = nfs42_ssc_open(copy->ss_nsui->nsui_vfsmount,
&copy->c_fh, &copy->stateid);
if (IS_ERR(filp)) {
switch (PTR_ERR(filp)) {
case -EBADF:
@ -1764,11 +1745,10 @@ static int nfsd4_do_async_copy(void *data)
}
nfserr = nfsd4_do_copy(copy, filp, copy->nf_dst->nf_file,
false);
nfsd4_cleanup_inter_ssc(copy->ss_mnt, filp, copy->nf_dst);
nfsd4_cleanup_inter_ssc(copy->ss_nsui, filp, copy->nf_dst);
} else {
nfserr = nfsd4_do_copy(copy, copy->nf_src->nf_file,
copy->nf_dst->nf_file, false);
nfsd4_cleanup_intra_ssc(copy->nf_src, copy->nf_dst);
}
do_callback:
@ -1790,8 +1770,7 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
status = nfserr_notsupp;
goto out;
}
status = nfsd4_setup_inter_ssc(rqstp, cstate, copy,
&copy->ss_mnt);
status = nfsd4_setup_inter_ssc(rqstp, cstate, copy);
if (status)
return nfserr_offload_denied;
} else {
@ -1810,12 +1789,13 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
if (!async_copy)
goto out_err;
INIT_LIST_HEAD(&async_copy->copies);
refcount_set(&async_copy->refcount, 1);
async_copy->cp_src = kmalloc(sizeof(*async_copy->cp_src), GFP_KERNEL);
if (!async_copy->cp_src)
goto out_err;
if (!nfs4_init_copy_state(nn, copy))
goto out_err;
refcount_set(&async_copy->refcount, 1);
memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid.cs_stid,
sizeof(copy->cp_res.cb_stateid));
dup_copy_fields(copy, async_copy);
@ -1832,36 +1812,51 @@ nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
} else {
status = nfsd4_do_copy(copy, copy->nf_src->nf_file,
copy->nf_dst->nf_file, true);
nfsd4_cleanup_intra_ssc(copy->nf_src, copy->nf_dst);
}
out:
release_copy_files(copy);
return status;
out_err:
if (nfsd4_ssc_is_inter(copy)) {
/*
* Source's vfsmount of inter-copy will be unmounted
* by the laundromat. Use copy instead of async_copy
* since async_copy->ss_nsui might not be set yet.
*/
refcount_dec(&copy->ss_nsui->nsui_refcnt);
}
if (async_copy)
cleanup_async_copy(async_copy);
status = nfserrno(-ENOMEM);
/*
* source's vfsmount of inter-copy will be unmounted
* by the laundromat
*/
goto out;
}
struct nfsd4_copy *
static struct nfsd4_copy *
find_async_copy_locked(struct nfs4_client *clp, stateid_t *stateid)
{
struct nfsd4_copy *copy;
lockdep_assert_held(&clp->async_lock);
list_for_each_entry(copy, &clp->async_copies, copies) {
if (memcmp(&copy->cp_stateid.cs_stid, stateid, NFS4_STATEID_SIZE))
continue;
return copy;
}
return NULL;
}
static struct nfsd4_copy *
find_async_copy(struct nfs4_client *clp, stateid_t *stateid)
{
struct nfsd4_copy *copy;
spin_lock(&clp->async_lock);
list_for_each_entry(copy, &clp->async_copies, copies) {
if (memcmp(&copy->cp_stateid.cs_stid, stateid, NFS4_STATEID_SIZE))
continue;
copy = find_async_copy_locked(clp, stateid);
if (copy)
refcount_inc(&copy->refcount);
spin_unlock(&clp->async_lock);
return copy;
}
spin_unlock(&clp->async_lock);
return NULL;
return copy;
}
static __be32
@ -1948,22 +1943,24 @@ nfsd4_fallocate(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
nfsd_file_put(nf);
return status;
}
static __be32
nfsd4_offload_status(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate,
union nfsd4_op_u *u)
{
struct nfsd4_offload_status *os = &u->offload_status;
__be32 status = 0;
__be32 status = nfs_ok;
struct nfsd4_copy *copy;
struct nfs4_client *clp = cstate->clp;
copy = find_async_copy(clp, &os->stateid);
if (copy) {
spin_lock(&clp->async_lock);
copy = find_async_copy_locked(clp, &os->stateid);
if (copy)
os->count = copy->cp_res.wr_bytes_written;
nfs4_put_copy(copy);
} else
else
status = nfserr_bad_stateid;
spin_unlock(&clp->async_lock);
return status;
}
@ -3619,12 +3616,13 @@ static const struct svc_procedure nfsd_procedures4[2] = {
},
};
static unsigned int nfsd_count3[ARRAY_SIZE(nfsd_procedures4)];
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nfsd_count4[ARRAY_SIZE(nfsd_procedures4)]);
const struct svc_version nfsd_version4 = {
.vs_vers = 4,
.vs_nproc = 2,
.vs_nproc = ARRAY_SIZE(nfsd_procedures4),
.vs_proc = nfsd_procedures4,
.vs_count = nfsd_count3,
.vs_count = nfsd_count4,
.vs_dispatch = nfsd_dispatch,
.vs_xdrsize = NFS4_SVC_XDRSIZE,
.vs_rpcb_optnl = true,

View File

@ -599,14 +599,6 @@ put_nfs4_file(struct nfs4_file *fi)
}
}
static struct nfsd_file *
__nfs4_get_fd(struct nfs4_file *f, int oflag)
{
if (f->fi_fds[oflag])
return nfsd_file_get(f->fi_fds[oflag]);
return NULL;
}
static struct nfsd_file *
find_writeable_file_locked(struct nfs4_file *f)
{
@ -614,9 +606,9 @@ find_writeable_file_locked(struct nfs4_file *f)
lockdep_assert_held(&f->fi_lock);
ret = __nfs4_get_fd(f, O_WRONLY);
ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
if (!ret)
ret = __nfs4_get_fd(f, O_RDWR);
ret = nfsd_file_get(f->fi_fds[O_RDWR]);
return ret;
}
@ -639,9 +631,9 @@ find_readable_file_locked(struct nfs4_file *f)
lockdep_assert_held(&f->fi_lock);
ret = __nfs4_get_fd(f, O_RDONLY);
ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
if (!ret)
ret = __nfs4_get_fd(f, O_RDWR);
ret = nfsd_file_get(f->fi_fds[O_RDWR]);
return ret;
}
@ -665,11 +657,11 @@ find_any_file(struct nfs4_file *f)
if (!f)
return NULL;
spin_lock(&f->fi_lock);
ret = __nfs4_get_fd(f, O_RDWR);
ret = nfsd_file_get(f->fi_fds[O_RDWR]);
if (!ret) {
ret = __nfs4_get_fd(f, O_WRONLY);
ret = nfsd_file_get(f->fi_fds[O_WRONLY]);
if (!ret)
ret = __nfs4_get_fd(f, O_RDONLY);
ret = nfsd_file_get(f->fi_fds[O_RDONLY]);
}
spin_unlock(&f->fi_lock);
return ret;
@ -688,15 +680,6 @@ static struct nfsd_file *find_any_file_locked(struct nfs4_file *f)
return NULL;
}
static struct nfsd_file *find_deleg_file_locked(struct nfs4_file *f)
{
lockdep_assert_held(&f->fi_lock);
if (f->fi_deleg_file)
return f->fi_deleg_file;
return NULL;
}
static atomic_long_t num_delegations;
unsigned long max_delegations;
@ -992,7 +975,6 @@ static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
stid->cs_stid.si_opaque.so_clid.cl_boot = (u32)nn->boot_time;
stid->cs_stid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
stid->cs_type = cs_type;
idr_preload(GFP_KERNEL);
spin_lock(&nn->s2s_cp_lock);
@ -1003,6 +985,7 @@ static int nfs4_init_cp_state(struct nfsd_net *nn, copy_stateid_t *stid,
idr_preload_end();
if (new_id < 0)
return 0;
stid->cs_type = cs_type;
return 1;
}
@ -1036,7 +1019,8 @@ void nfs4_free_copy_state(struct nfsd4_copy *copy)
{
struct nfsd_net *nn;
WARN_ON_ONCE(copy->cp_stateid.cs_type != NFS4_COPY_STID);
if (copy->cp_stateid.cs_type != NFS4_COPY_STID)
return;
nn = net_generic(copy->cp_clp->net, nfsd_net_id);
spin_lock(&nn->s2s_cp_lock);
idr_remove(&nn->s2s_cp_stateids,
@ -2705,7 +2689,7 @@ static int nfs4_show_deleg(struct seq_file *s, struct nfs4_stid *st)
ds = delegstateid(st);
nf = st->sc_file;
spin_lock(&nf->fi_lock);
file = find_deleg_file_locked(nf);
file = nf->fi_deleg_file;
if (!file)
goto out;
@ -5298,16 +5282,17 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp,
/* test and set deny mode */
spin_lock(&fp->fi_lock);
status = nfs4_file_check_deny(fp, open->op_share_deny);
if (status == nfs_ok) {
if (status != nfserr_share_denied) {
set_deny(open->op_share_deny, stp);
fp->fi_share_deny |=
(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
} else {
if (nfs4_resolve_deny_conflicts_locked(fp, false,
stp, open->op_share_deny, false))
status = nfserr_jukebox;
}
switch (status) {
case nfs_ok:
set_deny(open->op_share_deny, stp);
fp->fi_share_deny |=
(open->op_share_deny & NFS4_SHARE_DENY_BOTH);
break;
case nfserr_share_denied:
if (nfs4_resolve_deny_conflicts_locked(fp, false,
stp, open->op_share_deny, false))
status = nfserr_jukebox;
break;
}
spin_unlock(&fp->fi_lock);
@ -5438,6 +5423,23 @@ nfsd4_verify_deleg_dentry(struct nfsd4_open *open, struct nfs4_file *fp,
return 0;
}
/*
* We avoid breaking delegations held by a client due to its own activity, but
* clearing setuid/setgid bits on a write is an implicit activity and the client
* may not notice and continue using the old mode. Avoid giving out a delegation
* on setuid/setgid files when the client is requesting an open for write.
*/
static int
nfsd4_verify_setuid_write(struct nfsd4_open *open, struct nfsd_file *nf)
{
struct inode *inode = file_inode(nf->nf_file);
if ((open->op_share_access & NFS4_SHARE_ACCESS_WRITE) &&
(inode->i_mode & (S_ISUID|S_ISGID)))
return -EAGAIN;
return 0;
}
static struct nfs4_delegation *
nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
struct svc_fh *parent)
@ -5471,6 +5473,8 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
spin_lock(&fp->fi_lock);
if (nfs4_delegation_exists(clp, fp))
status = -EAGAIN;
else if (nfsd4_verify_setuid_write(open, nf))
status = -EAGAIN;
else if (!fp->fi_deleg_file) {
fp->fi_deleg_file = nf;
/* increment early to prevent fi_deleg_file from being
@ -5511,6 +5515,14 @@ nfs4_set_delegation(struct nfsd4_open *open, struct nfs4_ol_stateid *stp,
if (status)
goto out_unlock;
/*
* Now that the deleg is set, check again to ensure that nothing
* raced in and changed the mode while we weren't lookng.
*/
status = nfsd4_verify_setuid_write(open, fp->fi_deleg_file);
if (status)
goto out_unlock;
spin_lock(&state_lock);
spin_lock(&fp->fi_lock);
if (fp->fi_had_conflict)
@ -6406,23 +6418,26 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
static struct nfsd_file *
nfs4_find_file(struct nfs4_stid *s, int flags)
{
struct nfsd_file *ret = NULL;
if (!s)
return NULL;
switch (s->sc_type) {
case NFS4_DELEG_STID:
if (WARN_ON_ONCE(!s->sc_file->fi_deleg_file))
return NULL;
return nfsd_file_get(s->sc_file->fi_deleg_file);
spin_lock(&s->sc_file->fi_lock);
ret = nfsd_file_get(s->sc_file->fi_deleg_file);
spin_unlock(&s->sc_file->fi_lock);
break;
case NFS4_OPEN_STID:
case NFS4_LOCK_STID:
if (flags & RD_STATE)
return find_readable_file(s->sc_file);
ret = find_readable_file(s->sc_file);
else
return find_writeable_file(s->sc_file);
ret = find_writeable_file(s->sc_file);
}
return NULL;
return ret;
}
static __be32
@ -6547,8 +6562,19 @@ void nfs4_put_cpntf_state(struct nfsd_net *nn, struct nfs4_cpntf_state *cps)
spin_unlock(&nn->s2s_cp_lock);
}
/*
* Checks for stateid operations
/**
* nfs4_preprocess_stateid_op - find and prep stateid for an operation
* @rqstp: incoming request from client
* @cstate: current compound state
* @fhp: filehandle associated with requested stateid
* @stateid: stateid (provided by client)
* @flags: flags describing type of operation to be done
* @nfp: optional nfsd_file return pointer (may be NULL)
* @cstid: optional returned nfs4_stid pointer (may be NULL)
*
* Given info from the client, look up a nfs4_stid for the operation. On
* success, it returns a reference to the nfs4_stid and/or the nfsd_file
* associated with it.
*/
__be32
nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
@ -6737,8 +6763,18 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
return status;
}
/*
* Checks for sequence id mutating operations.
/**
* nfs4_preprocess_seqid_op - find and prep an ol_stateid for a seqid-morphing op
* @cstate: compund state
* @seqid: seqid (provided by client)
* @stateid: stateid (provided by client)
* @typemask: mask of allowable types for this operation
* @stpp: return pointer for the stateid found
* @nn: net namespace for request
*
* Given a stateid+seqid from a client, look up an nfs4_ol_stateid and
* return it in @stpp. On a nfs_ok return, the returned stateid will
* have its st_mutex locked.
*/
static __be32
nfs4_preprocess_seqid_op(struct nfsd4_compound_state *cstate, u32 seqid,

View File

@ -488,7 +488,7 @@ found_entry:
case RC_NOCACHE:
break;
case RC_REPLSTAT:
svc_putu32(&rqstp->rq_res.head[0], rp->c_replstat);
xdr_stream_encode_be32(&rqstp->rq_res_stream, rp->c_replstat);
rtn = RC_REPLY;
break;
case RC_REPLBUFF:
@ -509,7 +509,7 @@ out_trace:
* nfsd_cache_update - Update an entry in the duplicate reply cache.
* @rqstp: svc_rqst with a finished Reply
* @cachetype: which cache to update
* @statp: Reply's status code
* @statp: pointer to Reply's NFS status code, or NULL
*
* This is called from nfsd_dispatch when the procedure has been
* executed and the complete reply is in rqstp->rq_res.

View File

@ -14,7 +14,6 @@
#include <linux/lockd/lockd.h>
#include <linux/sunrpc/addr.h>
#include <linux/sunrpc/gss_api.h>
#include <linux/sunrpc/gss_krb5_enctypes.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
#include <linux/module.h>
#include <linux/fsnotify.h>
@ -47,7 +46,6 @@ enum {
NFSD_MaxBlkSize,
NFSD_MaxConnections,
NFSD_Filecache,
NFSD_SupportedEnctypes,
/*
* The below MUST come last. Otherwise we leave a hole in nfsd_files[]
* with !CONFIG_NFSD_V4 and simple_fill_super() goes oops
@ -187,16 +185,6 @@ static int export_features_show(struct seq_file *m, void *v)
DEFINE_SHOW_ATTRIBUTE(export_features);
#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
static int supported_enctypes_show(struct seq_file *m, void *v)
{
seq_printf(m, KRB5_SUPPORTED_ENCTYPES);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(supported_enctypes);
#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
static const struct file_operations pool_stats_operations = {
.open = nfsd_pool_stats_open,
.read = seq_read,
@ -1150,6 +1138,9 @@ static struct inode *nfsd_get_inode(struct super_block *sb, umode_t mode)
inode->i_op = &simple_dir_inode_operations;
inc_nlink(inode);
break;
case S_IFLNK:
inode->i_op = &simple_symlink_inode_operations;
break;
default:
break;
}
@ -1195,6 +1186,54 @@ out_err:
goto out;
}
#if IS_ENABLED(CONFIG_SUNRPC_GSS)
static int __nfsd_symlink(struct inode *dir, struct dentry *dentry,
umode_t mode, const char *content)
{
struct inode *inode;
inode = nfsd_get_inode(dir->i_sb, mode);
if (!inode)
return -ENOMEM;
inode->i_link = (char *)content;
inode->i_size = strlen(content);
d_add(dentry, inode);
inc_nlink(dir);
fsnotify_create(dir, dentry);
return 0;
}
/*
* @content is assumed to be a NUL-terminated string that lives
* longer than the symlink itself.
*/
static void nfsd_symlink(struct dentry *parent, const char *name,
const char *content)
{
struct inode *dir = parent->d_inode;
struct dentry *dentry;
int ret;
inode_lock(dir);
dentry = d_alloc_name(parent, name);
if (!dentry)
goto out;
ret = __nfsd_symlink(d_inode(parent), dentry, S_IFLNK | 0777, content);
if (ret)
dput(dentry);
out:
inode_unlock(dir);
}
#else
static inline void nfsd_symlink(struct dentry *parent, const char *name,
const char *content)
{
}
#endif
static void clear_ncl(struct inode *inode)
{
struct nfsdfs_client *ncl = inode->i_private;
@ -1355,10 +1394,6 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
[NFSD_MaxBlkSize] = {"max_block_size", &transaction_ops, S_IWUSR|S_IRUGO},
[NFSD_MaxConnections] = {"max_connections", &transaction_ops, S_IWUSR|S_IRUGO},
[NFSD_Filecache] = {"filecache", &nfsd_file_cache_stats_fops, S_IRUGO},
#if defined(CONFIG_SUNRPC_GSS) || defined(CONFIG_SUNRPC_GSS_MODULE)
[NFSD_SupportedEnctypes] = {"supported_krb5_enctypes",
&supported_enctypes_fops, S_IRUGO},
#endif /* CONFIG_SUNRPC_GSS or CONFIG_SUNRPC_GSS_MODULE */
#ifdef CONFIG_NFSD_V4
[NFSD_Leasetime] = {"nfsv4leasetime", &transaction_ops, S_IWUSR|S_IRUSR},
[NFSD_Gracetime] = {"nfsv4gracetime", &transaction_ops, S_IWUSR|S_IRUSR},
@ -1371,6 +1406,8 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
ret = simple_fill_super(sb, 0x6e667364, nfsd_files);
if (ret)
return ret;
nfsd_symlink(sb->s_root, "supported_krb5_enctypes",
"/proc/net/rpc/gss_krb5_enctypes");
dentry = nfsd_mkdir(sb->s_root, NULL, "clients");
if (IS_ERR(dentry))
return PTR_ERR(dentry);
@ -1458,16 +1495,11 @@ static __net_init int nfsd_init_net(struct net *net)
nn->nfsd_versions = NULL;
nn->nfsd4_minorversions = NULL;
nfsd4_init_leases_net(nn);
retval = nfsd_reply_cache_init(nn);
if (retval)
goto out_cache_error;
get_random_bytes(&nn->siphash_key, sizeof(nn->siphash_key));
seqlock_init(&nn->writeverf_lock);
return 0;
out_cache_error:
nfsd_idmap_shutdown(net);
out_idmap_error:
nfsd_export_shutdown(net);
out_export_error:
@ -1476,9 +1508,6 @@ out_export_error:
static __net_exit void nfsd_exit_net(struct net *net)
{
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
nfsd_reply_cache_shutdown(nn);
nfsd_idmap_shutdown(net);
nfsd_export_shutdown(net);
nfsd_netns_free_versions(net_generic(net, nfsd_net_id));

View File

@ -86,7 +86,7 @@ bool nfssvc_encode_voidres(struct svc_rqst *rqstp,
* Function prototypes.
*/
int nfsd_svc(int nrservs, struct net *net, const struct cred *cred);
int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp);
int nfsd_dispatch(struct svc_rqst *rqstp);
int nfsd_nrthreads(struct net *);
int nfsd_nrpools(struct net *);

View File

@ -838,11 +838,11 @@ static const struct svc_procedure nfsd_procedures2[18] = {
},
};
static unsigned int nfsd_count2[ARRAY_SIZE(nfsd_procedures2)];
static DEFINE_PER_CPU_ALIGNED(unsigned long,
nfsd_count2[ARRAY_SIZE(nfsd_procedures2)]);
const struct svc_version nfsd_version2 = {
.vs_vers = 2,
.vs_nproc = 18,
.vs_nproc = ARRAY_SIZE(nfsd_procedures2),
.vs_proc = nfsd_procedures2,
.vs_count = nfsd_count2,
.vs_dispatch = nfsd_dispatch,

View File

@ -363,7 +363,7 @@ void nfsd_copy_write_verifier(__be32 verf[2], struct nfsd_net *nn)
do {
read_seqbegin_or_lock(&nn->writeverf_lock, &seq);
memcpy(verf, nn->writeverf, sizeof(*verf));
memcpy(verf, nn->writeverf, sizeof(nn->writeverf));
} while (need_seqretry(&nn->writeverf_lock, seq));
done_seqretry(&nn->writeverf_lock, seq);
}
@ -427,16 +427,23 @@ static int nfsd_startup_net(struct net *net, const struct cred *cred)
ret = nfsd_file_cache_start_net(net);
if (ret)
goto out_lockd;
ret = nfs4_state_start_net(net);
ret = nfsd_reply_cache_init(nn);
if (ret)
goto out_filecache;
ret = nfs4_state_start_net(net);
if (ret)
goto out_reply_cache;
#ifdef CONFIG_NFSD_V4_2_INTER_SSC
nfsd4_ssc_init_umount_work(nn);
#endif
nn->nfsd_net_up = true;
return 0;
out_reply_cache:
nfsd_reply_cache_shutdown(nn);
out_filecache:
nfsd_file_cache_shutdown_net(net);
out_lockd:
@ -454,6 +461,7 @@ static void nfsd_shutdown_net(struct net *net)
struct nfsd_net *nn = net_generic(net, nfsd_net_id);
nfs4_state_shutdown_net(net);
nfsd_reply_cache_shutdown(nn);
nfsd_file_cache_shutdown_net(net);
if (nn->lockd_up) {
lockd_down(net);
@ -1022,7 +1030,6 @@ out:
/**
* nfsd_dispatch - Process an NFS or NFSACL Request
* @rqstp: incoming request
* @statp: pointer to location of accept_stat field in RPC Reply buffer
*
* This RPC dispatcher integrates the NFS server's duplicate reply cache.
*
@ -1030,9 +1037,10 @@ out:
* %0: Processing complete; do not send a Reply
* %1: Processing complete; send Reply in rqstp->rq_res
*/
int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
int nfsd_dispatch(struct svc_rqst *rqstp)
{
const struct svc_procedure *proc = rqstp->rq_procinfo;
__be32 *statp = rqstp->rq_accept_statp;
/*
* Give the xdr decoder a chance to change this if it wants
@ -1040,7 +1048,6 @@ int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
*/
rqstp->rq_cachetype = proc->pc_cachetype;
svcxdr_init_decode(rqstp);
if (!proc->pc_decode(rqstp, &rqstp->rq_arg_stream))
goto out_decode_err;
@ -1053,12 +1060,6 @@ int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
goto out_dropit;
}
/*
* Need to grab the location to store the status, as
* NFSv4 does some encoding while processing
*/
svcxdr_init_encode(rqstp);
*statp = proc->pc_func(rqstp);
if (test_bit(RQ_DROPME, &rqstp->rq_flags))
goto out_update_drop;

View File

@ -705,8 +705,6 @@ extern struct nfs4_client_reclaim *nfs4_client_to_reclaim(struct xdr_netobj name
extern bool nfs4_has_reclaimed_state(struct xdr_netobj name, struct nfsd_net *nn);
void put_nfs4_file(struct nfs4_file *fi);
extern struct nfsd4_copy *
find_async_copy(struct nfs4_client *clp, stateid_t *staetid);
extern void nfs4_put_cpntf_state(struct nfsd_net *nn,
struct nfs4_cpntf_state *cps);
extern __be32 manage_cpntf_state(struct nfsd_net *nn, stateid_t *st,

View File

@ -1202,37 +1202,6 @@ TRACE_EVENT(nfsd_file_close,
)
);
TRACE_EVENT(nfsd_file_fsync,
TP_PROTO(
const struct nfsd_file *nf,
int ret
),
TP_ARGS(nf, ret),
TP_STRUCT__entry(
__field(void *, nf_inode)
__field(int, nf_ref)
__field(int, ret)
__field(unsigned long, nf_flags)
__field(unsigned char, nf_may)
__field(struct file *, nf_file)
),
TP_fast_assign(
__entry->nf_inode = nf->nf_inode;
__entry->nf_ref = refcount_read(&nf->nf_ref);
__entry->ret = ret;
__entry->nf_flags = nf->nf_flags;
__entry->nf_may = nf->nf_may;
__entry->nf_file = nf->nf_file;
),
TP_printk("inode=%p ref=%d flags=%s may=%s nf_file=%p ret=%d",
__entry->nf_inode,
__entry->nf_ref,
show_nf_flags(__entry->nf_flags),
show_nfsd_may_flags(__entry->nf_may),
__entry->nf_file, __entry->ret
)
);
#include "cache.h"
TRACE_DEFINE_ENUM(RC_DROPIT);

View File

@ -126,9 +126,13 @@ nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
struct dentry *dentry = *dpp;
struct path path = {.mnt = mntget(exp->ex_path.mnt),
.dentry = dget(dentry)};
unsigned int follow_flags = 0;
int err = 0;
err = follow_down(&path);
if (exp->ex_flags & NFSEXP_CROSSMOUNT)
follow_flags = LOOKUP_AUTOMOUNT;
err = follow_down(&path, follow_flags);
if (err < 0)
goto out;
if (path.mnt == exp->ex_path.mnt && path.dentry == dentry &&
@ -223,7 +227,7 @@ int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp)
return 1;
if (nfsd4_is_junction(dentry))
return 1;
if (d_mountpoint(dentry))
if (d_managed(dentry))
/*
* Might only be a mountpoint in a different namespace,
* but we need to check.

View File

@ -571,7 +571,7 @@ struct nfsd4_copy {
struct task_struct *copy_task;
refcount_t refcount;
struct vfsmount *ss_mnt;
struct nfsd4_ssc_umount_item *ss_nsui;
struct nfs_fh c_fh;
nfs4_stateid stateid;
};

View File

@ -196,9 +196,9 @@ struct nlm_block {
* Global variables
*/
extern const struct rpc_program nlm_program;
extern const struct svc_procedure nlmsvc_procedures[];
extern const struct svc_procedure nlmsvc_procedures[24];
#ifdef CONFIG_LOCKD_V4
extern const struct svc_procedure nlmsvc_procedures4[];
extern const struct svc_procedure nlmsvc_procedures4[24];
#endif
extern int nlmsvc_grace_period;
extern unsigned long nlmsvc_timeout;

View File

@ -77,7 +77,7 @@ struct dentry *lookup_one_positive_unlocked(struct mnt_idmap *idmap,
struct dentry *base, int len);
extern int follow_down_one(struct path *);
extern int follow_down(struct path *);
extern int follow_down(struct path *path, unsigned int flags);
extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);

View File

@ -53,6 +53,7 @@ static inline void nfs42_ssc_close(struct file *filep)
if (nfs_ssc_client_tbl.ssc_nfs4_ops)
(*nfs_ssc_client_tbl.ssc_nfs4_ops->sco_close)(filep);
}
#endif
struct nfsd4_ssc_umount_item {
struct list_head nsui_list;
@ -66,7 +67,6 @@ struct nfsd4_ssc_umount_item {
struct vfsmount *nsui_vfsmount;
char nsui_ipaddr[RPC_MAX_ADDRBUFLEN + 1];
};
#endif
/*
* NFS_FS

View File

@ -1,6 +1,4 @@
/*
* linux/include/linux/sunrpc/gss_krb5_types.h
*
* Adapted from MIT Kerberos 5-1.2.1 lib/include/krb5.h,
* lib/gssapi/krb5/gssapiP_krb5.h, and others
*
@ -36,6 +34,9 @@
*
*/
#ifndef _LINUX_SUNRPC_GSS_KRB5_H
#define _LINUX_SUNRPC_GSS_KRB5_H
#include <crypto/skcipher.h>
#include <linux/sunrpc/auth_gss.h>
#include <linux/sunrpc/gss_err.h>
@ -44,80 +45,15 @@
/* Length of constant used in key derivation */
#define GSS_KRB5_K5CLENGTH (5)
/* Maximum key length (in bytes) for the supported crypto algorithms*/
/* Maximum key length (in bytes) for the supported crypto algorithms */
#define GSS_KRB5_MAX_KEYLEN (32)
/* Maximum checksum function output for the supported crypto algorithms */
#define GSS_KRB5_MAX_CKSUM_LEN (20)
/* Maximum checksum function output for the supported enctypes */
#define GSS_KRB5_MAX_CKSUM_LEN (24)
/* Maximum blocksize for the supported crypto algorithms */
#define GSS_KRB5_MAX_BLOCKSIZE (16)
struct krb5_ctx;
struct gss_krb5_enctype {
const u32 etype; /* encryption (key) type */
const u32 ctype; /* checksum type */
const char *name; /* "friendly" name */
const char *encrypt_name; /* crypto encrypt name */
const char *cksum_name; /* crypto checksum name */
const u16 signalg; /* signing algorithm */
const u16 sealalg; /* sealing algorithm */
const u32 blocksize; /* encryption blocksize */
const u32 conflen; /* confounder length
(normally the same as
the blocksize) */
const u32 cksumlength; /* checksum length */
const u32 keyed_cksum; /* is it a keyed cksum? */
const u32 keybytes; /* raw key len, in bytes */
const u32 keylength; /* final key len, in bytes */
u32 (*encrypt) (struct crypto_sync_skcipher *tfm,
void *iv, void *in, void *out,
int length); /* encryption function */
u32 (*decrypt) (struct crypto_sync_skcipher *tfm,
void *iv, void *in, void *out,
int length); /* decryption function */
u32 (*mk_key) (const struct gss_krb5_enctype *gk5e,
struct xdr_netobj *in,
struct xdr_netobj *out); /* complete key generation */
u32 (*encrypt_v2) (struct krb5_ctx *kctx, u32 offset,
struct xdr_buf *buf,
struct page **pages); /* v2 encryption function */
u32 (*decrypt_v2) (struct krb5_ctx *kctx, u32 offset, u32 len,
struct xdr_buf *buf, u32 *headskip,
u32 *tailskip); /* v2 decryption function */
};
/* krb5_ctx flags definitions */
#define KRB5_CTX_FLAG_INITIATOR 0x00000001
#define KRB5_CTX_FLAG_CFX 0x00000002
#define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
struct krb5_ctx {
int initiate; /* 1 = initiating, 0 = accepting */
u32 enctype;
u32 flags;
const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
struct crypto_sync_skcipher *enc;
struct crypto_sync_skcipher *seq;
struct crypto_sync_skcipher *acceptor_enc;
struct crypto_sync_skcipher *initiator_enc;
struct crypto_sync_skcipher *acceptor_enc_aux;
struct crypto_sync_skcipher *initiator_enc_aux;
u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
u8 cksum[GSS_KRB5_MAX_KEYLEN];
atomic_t seq_send;
atomic64_t seq_send64;
time64_t endtime;
struct xdr_netobj mech_used;
u8 initiator_sign[GSS_KRB5_MAX_KEYLEN];
u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN];
u8 initiator_seal[GSS_KRB5_MAX_KEYLEN];
u8 acceptor_seal[GSS_KRB5_MAX_KEYLEN];
u8 initiator_integ[GSS_KRB5_MAX_KEYLEN];
u8 acceptor_integ[GSS_KRB5_MAX_KEYLEN];
};
/* The length of the Kerberos GSS token header */
#define GSS_KRB5_TOK_HDR_LEN (16)
@ -150,6 +86,12 @@ enum seal_alg {
SEAL_ALG_DES3KD = 0x0002
};
/*
* These values are assigned by IANA and published via the
* subregistry at the link below:
*
* https://www.iana.org/assignments/kerberos-parameters/kerberos-parameters.xhtml#kerberos-parameters-2
*/
#define CKSUMTYPE_CRC32 0x0001
#define CKSUMTYPE_RSA_MD4 0x0002
#define CKSUMTYPE_RSA_MD4_DES 0x0003
@ -160,6 +102,10 @@ enum seal_alg {
#define CKSUMTYPE_HMAC_SHA1_DES3 0x000c
#define CKSUMTYPE_HMAC_SHA1_96_AES128 0x000f
#define CKSUMTYPE_HMAC_SHA1_96_AES256 0x0010
#define CKSUMTYPE_CMAC_CAMELLIA128 0x0011
#define CKSUMTYPE_CMAC_CAMELLIA256 0x0012
#define CKSUMTYPE_HMAC_SHA256_128_AES128 0x0013
#define CKSUMTYPE_HMAC_SHA384_192_AES256 0x0014
#define CKSUMTYPE_HMAC_MD5_ARCFOUR -138 /* Microsoft md5 hmac cksumtype */
/* from gssapi_err_krb5.h */
@ -180,6 +126,11 @@ enum seal_alg {
/* per Kerberos v5 protocol spec crypto types from the wire.
* these get mapped to linux kernel crypto routines.
*
* These values are assigned by IANA and published via the
* subregistry at the link below:
*
* https://www.iana.org/assignments/kerberos-parameters/kerberos-parameters.xhtml#kerberos-parameters-1
*/
#define ENCTYPE_NULL 0x0000
#define ENCTYPE_DES_CBC_CRC 0x0001 /* DES cbc mode with CRC-32 */
@ -193,8 +144,12 @@ enum seal_alg {
#define ENCTYPE_DES3_CBC_SHA1 0x0010
#define ENCTYPE_AES128_CTS_HMAC_SHA1_96 0x0011
#define ENCTYPE_AES256_CTS_HMAC_SHA1_96 0x0012
#define ENCTYPE_AES128_CTS_HMAC_SHA256_128 0x0013
#define ENCTYPE_AES256_CTS_HMAC_SHA384_192 0x0014
#define ENCTYPE_ARCFOUR_HMAC 0x0017
#define ENCTYPE_ARCFOUR_HMAC_EXP 0x0018
#define ENCTYPE_CAMELLIA128_CTS_CMAC 0x0019
#define ENCTYPE_CAMELLIA256_CTS_CMAC 0x001A
#define ENCTYPE_UNKNOWN 0x01ff
/*
@ -216,103 +171,4 @@ enum seal_alg {
#define KG_USAGE_INITIATOR_SEAL (24)
#define KG_USAGE_INITIATOR_SIGN (25)
/*
* This compile-time check verifies that we will not exceed the
* slack space allotted by the client and server auth_gss code
* before they call gss_wrap().
*/
#define GSS_KRB5_MAX_SLACK_NEEDED \
(GSS_KRB5_TOK_HDR_LEN /* gss token header */ \
+ GSS_KRB5_MAX_CKSUM_LEN /* gss token checksum */ \
+ GSS_KRB5_MAX_BLOCKSIZE /* confounder */ \
+ GSS_KRB5_MAX_BLOCKSIZE /* possible padding */ \
+ GSS_KRB5_TOK_HDR_LEN /* encrypted hdr in v2 token */\
+ GSS_KRB5_MAX_CKSUM_LEN /* encryption hmac */ \
+ 4 + 4 /* RPC verifier */ \
+ GSS_KRB5_TOK_HDR_LEN \
+ GSS_KRB5_MAX_CKSUM_LEN)
u32
make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
struct xdr_buf *body, int body_offset, u8 *cksumkey,
unsigned int usage, struct xdr_netobj *cksumout);
u32
make_checksum_v2(struct krb5_ctx *, char *header, int hdrlen,
struct xdr_buf *body, int body_offset, u8 *key,
unsigned int usage, struct xdr_netobj *cksum);
u32 gss_get_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
struct xdr_netobj *);
u32 gss_verify_mic_kerberos(struct gss_ctx *, struct xdr_buf *,
struct xdr_netobj *);
u32
gss_wrap_kerberos(struct gss_ctx *ctx_id, int offset,
struct xdr_buf *outbuf, struct page **pages);
u32
gss_unwrap_kerberos(struct gss_ctx *ctx_id, int offset, int len,
struct xdr_buf *buf);
u32
krb5_encrypt(struct crypto_sync_skcipher *key,
void *iv, void *in, void *out, int length);
u32
krb5_decrypt(struct crypto_sync_skcipher *key,
void *iv, void *in, void *out, int length);
int
gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *outbuf,
int offset, struct page **pages);
int
gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm, struct xdr_buf *inbuf,
int offset);
s32
krb5_make_seq_num(struct krb5_ctx *kctx,
struct crypto_sync_skcipher *key,
int direction,
u32 seqnum, unsigned char *cksum, unsigned char *buf);
s32
krb5_get_seq_num(struct krb5_ctx *kctx,
unsigned char *cksum,
unsigned char *buf, int *direction, u32 *seqnum);
int
xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen);
u32
krb5_derive_key(const struct gss_krb5_enctype *gk5e,
const struct xdr_netobj *inkey,
struct xdr_netobj *outkey,
const struct xdr_netobj *in_constant,
gfp_t gfp_mask);
u32
gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e,
struct xdr_netobj *randombits,
struct xdr_netobj *key);
u32
gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e,
struct xdr_netobj *randombits,
struct xdr_netobj *key);
u32
gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
struct xdr_buf *buf,
struct page **pages);
u32
gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
struct xdr_buf *buf, u32 *plainoffset,
u32 *plainlen);
void
gss_krb5_make_confounder(char *p, u32 conflen);
#endif /* _LINUX_SUNRPC_GSS_KRB5_H */

View File

@ -1,41 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Define the string that exports the set of kernel-supported
* Kerberos enctypes. This list is sent via upcall to gssd, and
* is also exposed via the nfsd /proc API. The consumers generally
* treat this as an ordered list, where the first item in the list
* is the most preferred.
*/
#ifndef _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H
#define _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H
#ifdef CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES
/*
* NB: This list includes DES3_CBC_SHA1, which was deprecated by RFC 8429.
*
* ENCTYPE_AES256_CTS_HMAC_SHA1_96
* ENCTYPE_AES128_CTS_HMAC_SHA1_96
* ENCTYPE_DES3_CBC_SHA1
*/
#define KRB5_SUPPORTED_ENCTYPES "18,17,16"
#else /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
/*
* NB: This list includes encryption types that were deprecated
* by RFC 8429 and RFC 6649.
*
* ENCTYPE_AES256_CTS_HMAC_SHA1_96
* ENCTYPE_AES128_CTS_HMAC_SHA1_96
* ENCTYPE_DES3_CBC_SHA1
* ENCTYPE_DES_CBC_MD5
* ENCTYPE_DES_CBC_CRC
* ENCTYPE_DES_CBC_MD4
*/
#define KRB5_SUPPORTED_ENCTYPES "18,17,16,3,1,2"
#endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
#endif /* _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H */

View File

@ -34,6 +34,11 @@ enum rpc_auth_flavors {
RPC_AUTH_GSS_SPKMP = 390011,
};
/* Maximum size (in octets) of the machinename in an AUTH_UNIX
* credential (per RFC 5531 Appendix A)
*/
#define RPC_MAX_MACHINENAME (255)
/* Maximum size (in bytes) of an rpc credential or verifier */
#define RPC_MAX_AUTH_SIZE (400)

View File

@ -21,14 +21,6 @@
#include <linux/mm.h>
#include <linux/pagevec.h>
/* statistics for svc_pool structures */
struct svc_pool_stats {
atomic_long_t packets;
unsigned long sockets_queued;
atomic_long_t threads_woken;
atomic_long_t threads_timedout;
};
/*
*
* RPC service thread pool.
@ -45,7 +37,12 @@ struct svc_pool {
struct list_head sp_sockets; /* pending sockets */
unsigned int sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
struct svc_pool_stats sp_stats; /* statistics on pool operation */
/* statistics on pool operation */
struct percpu_counter sp_sockets_queued;
struct percpu_counter sp_threads_woken;
struct percpu_counter sp_threads_timedout;
#define SP_TASK_PENDING (0) /* still work to do even if no
* xprt is queued. */
#define SP_CONGESTED (1)
@ -193,40 +190,6 @@ extern u32 svc_max_payload(const struct svc_rqst *rqstp);
#define RPCSVC_MAXPAGES ((RPCSVC_MAXPAYLOAD+PAGE_SIZE-1)/PAGE_SIZE \
+ 2 + 1)
static inline u32 svc_getnl(struct kvec *iov)
{
__be32 val, *vp;
vp = iov->iov_base;
val = *vp++;
iov->iov_base = (void*)vp;
iov->iov_len -= sizeof(__be32);
return ntohl(val);
}
static inline void svc_putnl(struct kvec *iov, u32 val)
{
__be32 *vp = iov->iov_base + iov->iov_len;
*vp = htonl(val);
iov->iov_len += sizeof(__be32);
}
static inline __be32 svc_getu32(struct kvec *iov)
{
__be32 val, *vp;
vp = iov->iov_base;
val = *vp++;
iov->iov_base = (void*)vp;
iov->iov_len -= sizeof(__be32);
return val;
}
static inline void svc_putu32(struct kvec *iov, __be32 val)
{
__be32 *vp = iov->iov_base + iov->iov_len;
*vp = val;
iov->iov_len += sizeof(__be32);
}
/*
* The context of a single thread, including the request currently being
* processed.
@ -285,6 +248,7 @@ struct svc_rqst {
void * rq_argp; /* decoded arguments */
void * rq_resp; /* xdr'd results */
__be32 *rq_accept_statp;
void * rq_auth_data; /* flavor-specific data */
__be32 rq_auth_stat; /* authentication status */
int rq_auth_slack; /* extra space xdr code
@ -345,29 +309,6 @@ static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
return (struct sockaddr *) &rqst->rq_daddr;
}
/*
* Check buffer bounds after decoding arguments
*/
static inline int
xdr_argsize_check(struct svc_rqst *rqstp, __be32 *p)
{
char *cp = (char *)p;
struct kvec *vec = &rqstp->rq_arg.head[0];
return cp >= (char*)vec->iov_base
&& cp <= (char*)vec->iov_base + vec->iov_len;
}
static inline int
xdr_ressize_check(struct svc_rqst *rqstp, __be32 *p)
{
struct kvec *vec = &rqstp->rq_res.head[0];
char *cp = (char*)p;
vec->iov_len = cp - (char*)vec->iov_base;
return vec->iov_len <= PAGE_SIZE;
}
static inline void svc_free_res_pages(struct svc_rqst *rqstp)
{
while (rqstp->rq_next_page != rqstp->rq_respages) {
@ -394,7 +335,7 @@ struct svc_deferred_req {
struct svc_process_info {
union {
int (*dispatch)(struct svc_rqst *, __be32 *);
int (*dispatch)(struct svc_rqst *rqstp);
struct {
unsigned int lovers;
unsigned int hivers;
@ -433,7 +374,7 @@ struct svc_version {
u32 vs_vers; /* version number */
u32 vs_nproc; /* number of procedures */
const struct svc_procedure *vs_proc; /* per-procedure info */
unsigned int *vs_count; /* call counts */
unsigned long __percpu *vs_count; /* call counts */
u32 vs_xdrsize; /* xdrsize needed for this version */
/* Don't register with rpcbind */
@ -446,7 +387,7 @@ struct svc_version {
bool vs_need_cong_ctrl;
/* Dispatch function */
int (*vs_dispatch)(struct svc_rqst *, __be32 *);
int (*vs_dispatch)(struct svc_rqst *rqstp);
};
/*
@ -540,9 +481,6 @@ static inline void svc_reserve_auth(struct svc_rqst *rqstp, int space)
* svcxdr_init_decode - Prepare an xdr_stream for Call decoding
* @rqstp: controlling server RPC transaction context
*
* This function currently assumes the RPC header in rq_arg has
* already been decoded. Upon return, xdr->p points to the
* location of the upper layer header.
*/
static inline void svcxdr_init_decode(struct svc_rqst *rqstp)
{
@ -550,11 +488,7 @@ static inline void svcxdr_init_decode(struct svc_rqst *rqstp)
struct xdr_buf *buf = &rqstp->rq_arg;
struct kvec *argv = buf->head;
/*
* svc_getnl() and friends do not keep the xdr_buf's ::len
* field up to date. Refresh that field before initializing
* the argument decoding stream.
*/
WARN_ON(buf->len != buf->head->iov_len + buf->page_len + buf->tail->iov_len);
buf->len = buf->head->iov_len + buf->page_len + buf->tail->iov_len;
xdr_init_decode(xdr, buf, argv->iov_base, NULL);
@ -577,12 +511,53 @@ static inline void svcxdr_init_encode(struct svc_rqst *rqstp)
xdr->buf = buf;
xdr->iov = resv;
xdr->p = resv->iov_base + resv->iov_len;
xdr->end = resv->iov_base + PAGE_SIZE - rqstp->rq_auth_slack;
xdr->end = resv->iov_base + PAGE_SIZE;
buf->len = resv->iov_len;
xdr->page_ptr = buf->pages - 1;
buf->buflen = PAGE_SIZE * (rqstp->rq_page_end - buf->pages);
buf->buflen -= rqstp->rq_auth_slack;
xdr->rqst = NULL;
}
/**
* svcxdr_set_auth_slack -
* @rqstp: RPC transaction
* @slack: buffer space to reserve for the transaction's security flavor
*
* Set the request's slack space requirement, and set aside that much
* space in the rqstp's rq_res.head for use when the auth wraps the Reply.
*/
static inline void svcxdr_set_auth_slack(struct svc_rqst *rqstp, int slack)
{
struct xdr_stream *xdr = &rqstp->rq_res_stream;
struct xdr_buf *buf = &rqstp->rq_res;
struct kvec *resv = buf->head;
rqstp->rq_auth_slack = slack;
xdr->end -= XDR_QUADLEN(slack);
buf->buflen -= rqstp->rq_auth_slack;
WARN_ON(xdr->iov != resv);
WARN_ON(xdr->p > xdr->end);
}
/**
* svcxdr_set_accept_stat - Reserve space for the accept_stat field
* @rqstp: RPC transaction context
*
* Return values:
* %true: Success
* %false: No response buffer space was available
*/
static inline bool svcxdr_set_accept_stat(struct svc_rqst *rqstp)
{
struct xdr_stream *xdr = &rqstp->rq_res_stream;
rqstp->rq_accept_statp = xdr_reserve_space(xdr, XDR_UNIT);
if (unlikely(!rqstp->rq_accept_statp))
return false;
*rqstp->rq_accept_statp = rpc_success;
return true;
}
#endif /* SUNRPC_SVC_H */

View File

@ -26,7 +26,6 @@ struct svc_xprt_ops {
void (*xpo_release_rqst)(struct svc_rqst *);
void (*xpo_detach)(struct svc_xprt *);
void (*xpo_free)(struct svc_xprt *);
void (*xpo_secure_port)(struct svc_rqst *rqstp);
void (*xpo_kill_temp_xprt)(struct svc_xprt *);
void (*xpo_start_tls)(struct svc_xprt *);
};

View File

@ -188,7 +188,6 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p)
/*
* XDR buffer helper functions
*/
extern void xdr_shift_buf(struct xdr_buf *, size_t);
extern void xdr_buf_from_iov(const struct kvec *, struct xdr_buf *);
extern int xdr_buf_subsegment(const struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int);
extern void xdr_buf_trim(struct xdr_buf *, unsigned int);
@ -247,6 +246,7 @@ extern int xdr_reserve_space_vec(struct xdr_stream *xdr, struct kvec *vec,
size_t nbytes);
extern void __xdr_commit_encode(struct xdr_stream *xdr);
extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len);
extern void xdr_truncate_decode(struct xdr_stream *xdr, size_t len);
extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen);
extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages,
unsigned int base, unsigned int len);
@ -346,6 +346,11 @@ ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str,
size_t size);
ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
size_t maxlen, gfp_t gfp_flags);
ssize_t xdr_stream_decode_opaque_auth(struct xdr_stream *xdr, u32 *flavor,
void **body, unsigned int *body_len);
ssize_t xdr_stream_encode_opaque_auth(struct xdr_stream *xdr, u32 flavor,
void *body, unsigned int body_len);
/**
* xdr_align_size - Calculate padded size of an object
* @n: Size of an object being XDR encoded (in bytes)
@ -469,6 +474,27 @@ xdr_stream_encode_u32(struct xdr_stream *xdr, __u32 n)
return len;
}
/**
* xdr_stream_encode_be32 - Encode a big-endian 32-bit integer
* @xdr: pointer to xdr_stream
* @n: integer to encode
*
* Return values:
* On success, returns length in bytes of XDR buffer consumed
* %-EMSGSIZE on XDR buffer overflow
*/
static inline ssize_t
xdr_stream_encode_be32(struct xdr_stream *xdr, __be32 n)
{
const size_t len = sizeof(n);
__be32 *p = xdr_reserve_space(xdr, len);
if (unlikely(!p))
return -EMSGSIZE;
*p = n;
return len;
}
/**
* xdr_stream_encode_u64 - Encode a 64-bit integer
* @xdr: pointer to xdr_stream

View File

@ -206,8 +206,30 @@ DECLARE_EVENT_CLASS(rpcgss_svc_gssapi_class,
), \
TP_ARGS(rqstp, maj_stat))
DEFINE_SVC_GSSAPI_EVENT(wrap);
DEFINE_SVC_GSSAPI_EVENT(unwrap);
DEFINE_SVC_GSSAPI_EVENT(mic);
DEFINE_SVC_GSSAPI_EVENT(get_mic);
TRACE_EVENT(rpcgss_svc_wrap_failed,
TP_PROTO(
const struct svc_rqst *rqstp
),
TP_ARGS(rqstp),
TP_STRUCT__entry(
__field(u32, xid)
__string(addr, rqstp->rq_xprt->xpt_remotebuf)
),
TP_fast_assign(
__entry->xid = be32_to_cpu(rqstp->rq_xid);
__assign_str(addr, rqstp->rq_xprt->xpt_remotebuf);
),
TP_printk("addr=%s xid=0x%08x", __get_str(addr), __entry->xid)
);
TRACE_EVENT(rpcgss_svc_unwrap_failed,
TP_PROTO(

View File

@ -1819,20 +1819,20 @@ TRACE_EVENT(svc_stats_latency,
#define show_svc_xprt_flags(flags) \
__print_flags(flags, "|", \
{ (1UL << XPT_BUSY), "XPT_BUSY"}, \
{ (1UL << XPT_CONN), "XPT_CONN"}, \
{ (1UL << XPT_CLOSE), "XPT_CLOSE"}, \
{ (1UL << XPT_DATA), "XPT_DATA"}, \
{ (1UL << XPT_TEMP), "XPT_TEMP"}, \
{ (1UL << XPT_DEAD), "XPT_DEAD"}, \
{ (1UL << XPT_CHNGBUF), "XPT_CHNGBUF"}, \
{ (1UL << XPT_DEFERRED), "XPT_DEFERRED"}, \
{ (1UL << XPT_OLD), "XPT_OLD"}, \
{ (1UL << XPT_LISTENER), "XPT_LISTENER"}, \
{ (1UL << XPT_CACHE_AUTH), "XPT_CACHE_AUTH"}, \
{ (1UL << XPT_LOCAL), "XPT_LOCAL"}, \
{ (1UL << XPT_KILL_TEMP), "XPT_KILL_TEMP"}, \
{ (1UL << XPT_CONG_CTRL), "XPT_CONG_CTRL"})
{ BIT(XPT_BUSY), "BUSY" }, \
{ BIT(XPT_CONN), "CONN" }, \
{ BIT(XPT_CLOSE), "CLOSE" }, \
{ BIT(XPT_DATA), "DATA" }, \
{ BIT(XPT_TEMP), "TEMP" }, \
{ BIT(XPT_DEAD), "DEAD" }, \
{ BIT(XPT_CHNGBUF), "CHNGBUF" }, \
{ BIT(XPT_DEFERRED), "DEFERRED" }, \
{ BIT(XPT_OLD), "OLD" }, \
{ BIT(XPT_LISTENER), "LISTENER" }, \
{ BIT(XPT_CACHE_AUTH), "CACHE_AUTH" }, \
{ BIT(XPT_LOCAL), "LOCAL" }, \
{ BIT(XPT_KILL_TEMP), "KILL_TEMP" }, \
{ BIT(XPT_CONG_CTRL), "CONG_CTRL" })
TRACE_EVENT(svc_xprt_create_err,
TP_PROTO(

30
net/sunrpc/.kunitconfig Normal file
View File

@ -0,0 +1,30 @@
CONFIG_KUNIT=y
CONFIG_UBSAN=y
CONFIG_STACKTRACE=y
CONFIG_NET=y
CONFIG_NETWORK_FILESYSTEMS=y
CONFIG_INET=y
CONFIG_FILE_LOCKING=y
CONFIG_MULTIUSER=y
CONFIG_CRYPTO=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_CTS=y
CONFIG_CRYPTO_ECB=y
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_CMAC=y
CONFIG_CRYPTO_MD5=y
CONFIG_CRYPTO_SHA1=y
CONFIG_CRYPTO_SHA256=y
CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_DES=y
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_CAMELLIA=y
CONFIG_NFS_FS=y
CONFIG_SUNRPC=y
CONFIG_SUNRPC_GSS=y
CONFIG_RPCSEC_GSS_KRB5=y
CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_DES=y
CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1=y
CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA=y
CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2=y
CONFIG_RPCSEC_GSS_KRB5_KUNIT_TEST=y

View File

@ -19,10 +19,10 @@ config SUNRPC_SWAP
config RPCSEC_GSS_KRB5
tristate "Secure RPC: Kerberos V mechanism"
depends on SUNRPC && CRYPTO
depends on CRYPTO_MD5 && CRYPTO_DES && CRYPTO_CBC && CRYPTO_CTS
depends on CRYPTO_ECB && CRYPTO_HMAC && CRYPTO_SHA1 && CRYPTO_AES
default y
select SUNRPC_GSS
select CRYPTO_SKCIPHER
select CRYPTO_HASH
help
Choose Y here to enable Secure RPC using the Kerberos version 5
GSS-API mechanism (RFC 1964).
@ -34,21 +34,93 @@ config RPCSEC_GSS_KRB5
If unsure, say Y.
config SUNRPC_DISABLE_INSECURE_ENCTYPES
bool "Secure RPC: Disable insecure Kerberos encryption types"
config RPCSEC_GSS_KRB5_SIMPLIFIED
bool
depends on RPCSEC_GSS_KRB5
default n
help
Choose Y here to disable the use of deprecated encryption types
with the Kerberos version 5 GSS-API mechanism (RFC 1964). The
deprecated encryption types include DES-CBC-MD5, DES-CBC-CRC,
and DES-CBC-MD4. These types were deprecated by RFC 6649 because
they were found to be insecure.
N is the default because many sites have deployed KDCs and
keytabs that contain only these deprecated encryption types.
Choosing Y prevents the use of known-insecure encryption types
but might result in compatibility problems.
config RPCSEC_GSS_KRB5_CRYPTOSYSTEM
bool
depends on RPCSEC_GSS_KRB5
config RPCSEC_GSS_KRB5_ENCTYPES_DES
bool "Enable Kerberos enctypes based on DES (deprecated)"
depends on RPCSEC_GSS_KRB5
depends on CRYPTO_CBC && CRYPTO_CTS && CRYPTO_ECB
depends on CRYPTO_HMAC && CRYPTO_MD5 && CRYPTO_SHA1
depends on CRYPTO_DES
default n
select RPCSEC_GSS_KRB5_SIMPLIFIED
help
Choose Y to enable the use of deprecated Kerberos 5
encryption types that utilize Data Encryption Standard
(DES) based ciphers. These include des-cbc-md5,
des-cbc-crc, and des-cbc-md4, which were deprecated by
RFC 6649, and des3-cbc-sha1, which was deprecated by RFC
8429.
These encryption types are known to be insecure, therefore
the default setting of this option is N. Support for these
encryption types is available only for compatibility with
legacy NFS client and server implementations.
Removal of support is planned for a subsequent kernel
release.
config RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1
bool "Enable Kerberos enctypes based on AES and SHA-1"
depends on RPCSEC_GSS_KRB5
depends on CRYPTO_CBC && CRYPTO_CTS
depends on CRYPTO_HMAC && CRYPTO_SHA1
depends on CRYPTO_AES
default y
select RPCSEC_GSS_KRB5_CRYPTOSYSTEM
help
Choose Y to enable the use of Kerberos 5 encryption types
that utilize Advanced Encryption Standard (AES) ciphers and
SHA-1 digests. These include aes128-cts-hmac-sha1-96 and
aes256-cts-hmac-sha1-96.
config RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA
bool "Enable Kerberos encryption types based on Camellia and CMAC"
depends on RPCSEC_GSS_KRB5
depends on CRYPTO_CBC && CRYPTO_CTS && CRYPTO_CAMELLIA
depends on CRYPTO_CMAC
default n
select RPCSEC_GSS_KRB5_CRYPTOSYSTEM
help
Choose Y to enable the use of Kerberos 5 encryption types
that utilize Camellia ciphers (RFC 3713) and CMAC digests
(NIST Special Publication 800-38B). These include
camellia128-cts-cmac and camellia256-cts-cmac.
config RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2
bool "Enable Kerberos enctypes based on AES and SHA-2"
depends on RPCSEC_GSS_KRB5
depends on CRYPTO_CBC && CRYPTO_CTS
depends on CRYPTO_HMAC && CRYPTO_SHA256 && CRYPTO_SHA512
depends on CRYPTO_AES
default n
select RPCSEC_GSS_KRB5_CRYPTOSYSTEM
help
Choose Y to enable the use of Kerberos 5 encryption types
that utilize Advanced Encryption Standard (AES) ciphers and
SHA-2 digests. These include aes128-cts-hmac-sha256-128 and
aes256-cts-hmac-sha384-192.
config RPCSEC_GSS_KRB5_KUNIT_TEST
tristate "KUnit tests for RPCSEC GSS Kerberos" if !KUNIT_ALL_TESTS
depends on RPCSEC_GSS_KRB5 && KUNIT
default KUNIT_ALL_TESTS
help
This builds the KUnit tests for RPCSEC GSS Kerberos 5.
KUnit tests run during boot and output the results to the debug
log in TAP format (https://testanything.org/). Only useful for
kernel devs running KUnit test harness and are not for inclusion
into a production build.
For more information on KUnit and unit tests in general, refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
config SUNRPC_DEBUG
bool "RPC: Enable dprintk debugging"

View File

@ -13,3 +13,5 @@ obj-$(CONFIG_RPCSEC_GSS_KRB5) += rpcsec_gss_krb5.o
rpcsec_gss_krb5-y := gss_krb5_mech.o gss_krb5_seal.o gss_krb5_unseal.o \
gss_krb5_seqnum.o gss_krb5_wrap.o gss_krb5_crypto.o gss_krb5_keys.o
obj-$(CONFIG_RPCSEC_GSS_KRB5_KUNIT_TEST) += gss_krb5_test.o

View File

@ -49,6 +49,22 @@ static unsigned int gss_key_expire_timeo = GSS_KEY_EXPIRE_TIMEO;
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
/*
* This compile-time check verifies that we will not exceed the
* slack space allotted by the client and server auth_gss code
* before they call gss_wrap().
*/
#define GSS_KRB5_MAX_SLACK_NEEDED \
(GSS_KRB5_TOK_HDR_LEN /* gss token header */ \
+ GSS_KRB5_MAX_CKSUM_LEN /* gss token checksum */ \
+ GSS_KRB5_MAX_BLOCKSIZE /* confounder */ \
+ GSS_KRB5_MAX_BLOCKSIZE /* possible padding */ \
+ GSS_KRB5_TOK_HDR_LEN /* encrypted hdr in v2 token */ \
+ GSS_KRB5_MAX_CKSUM_LEN /* encryption hmac */ \
+ XDR_UNIT * 2 /* RPC verifier */ \
+ GSS_KRB5_TOK_HDR_LEN \
+ GSS_KRB5_MAX_CKSUM_LEN)
#define GSS_CRED_SLACK (RPC_MAX_AUTH_SIZE * 2)
/* length of a krb5 verifier (48), plus data added before arguments when
* using integrity (two 4-byte integers): */
@ -1042,6 +1058,7 @@ gss_create_new(const struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
goto err_put_mech;
auth = &gss_auth->rpc_auth;
auth->au_cslack = GSS_CRED_SLACK >> 2;
BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
auth->au_rslack = GSS_KRB5_MAX_SLACK_NEEDED >> 2;
auth->au_verfsize = GSS_VERF_SLACK >> 2;
auth->au_ralign = GSS_VERF_SLACK >> 2;

View File

@ -46,11 +46,59 @@
#include <linux/random.h>
#include <linux/sunrpc/gss_krb5.h>
#include <linux/sunrpc/xdr.h>
#include <kunit/visibility.h>
#include "gss_krb5_internal.h"
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
/**
* krb5_make_confounder - Generate a confounder string
* @p: memory location into which to write the string
* @conflen: string length to write, in octets
*
* RFCs 1964 and 3961 mention only "a random confounder" without going
* into detail about its function or cryptographic requirements. The
* assumed purpose is to prevent repeated encryption of a plaintext with
* the same key from generating the same ciphertext. It is also used to
* pad minimum plaintext length to at least a single cipher block.
*
* However, in situations like the GSS Kerberos 5 mechanism, where the
* encryption IV is always all zeroes, the confounder also effectively
* functions like an IV. Thus, not only must it be unique from message
* to message, but it must also be difficult to predict. Otherwise an
* attacker can correlate the confounder to previous or future values,
* making the encryption easier to break.
*
* Given that the primary consumer of this encryption mechanism is a
* network storage protocol, a type of traffic that often carries
* predictable payloads (eg, all zeroes when reading unallocated blocks
* from a file), our confounder generation has to be cryptographically
* strong.
*/
void krb5_make_confounder(u8 *p, int conflen)
{
get_random_bytes(p, conflen);
}
/**
* krb5_encrypt - simple encryption of an RPCSEC GSS payload
* @tfm: initialized cipher transform
* @iv: pointer to an IV
* @in: plaintext to encrypt
* @out: OUT: ciphertext
* @length: length of input and output buffers, in bytes
*
* @iv may be NULL to force the use of an all-zero IV.
* The buffer containing the IV must be as large as the
* cipher's ivsize.
*
* Return values:
* %0: @in successfully encrypted into @out
* negative errno: @in not encrypted
*/
u32
krb5_encrypt(
struct crypto_sync_skcipher *tfm,
@ -90,6 +138,22 @@ out:
return ret;
}
/**
* krb5_decrypt - simple decryption of an RPCSEC GSS payload
* @tfm: initialized cipher transform
* @iv: pointer to an IV
* @in: ciphertext to decrypt
* @out: OUT: plaintext
* @length: length of input and output buffers, in bytes
*
* @iv may be NULL to force the use of an all-zero IV.
* The buffer containing the IV must be as large as the
* cipher's ivsize.
*
* Return values:
* %0: @in successfully decrypted into @out
* negative errno: @in not decrypted
*/
u32
krb5_decrypt(
struct crypto_sync_skcipher *tfm,
@ -203,8 +267,8 @@ make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
switch (kctx->gk5e->ctype) {
case CKSUMTYPE_RSA_MD5:
err = kctx->gk5e->encrypt(kctx->seq, NULL, checksumdata,
checksumdata, checksumlen);
err = krb5_encrypt(kctx->seq, NULL, checksumdata,
checksumdata, checksumlen);
if (err)
goto out;
memcpy(cksumout->data,
@ -228,92 +292,76 @@ out_free_cksum:
return err ? GSS_S_FAILURE : 0;
}
/*
* checksum the plaintext data and hdrlen bytes of the token header
* Per rfc4121, sec. 4.2.4, the checksum is performed over the data
* body then over the first 16 octets of the MIC token
* Inclusion of the header data in the calculation of the
* checksum is optional.
/**
* gss_krb5_checksum - Compute the MAC for a GSS Wrap or MIC token
* @tfm: an initialized hash transform
* @header: pointer to a buffer containing the token header, or NULL
* @hdrlen: number of octets in @header
* @body: xdr_buf containing an RPC message (body.len is the message length)
* @body_offset: byte offset into @body to start checksumming
* @cksumout: OUT: a buffer to be filled in with the computed HMAC
*
* Usually expressed as H = HMAC(K, message)[1..h] .
*
* Caller provides the truncation length of the output token (h) in
* cksumout.len.
*
* Return values:
* %GSS_S_COMPLETE: Digest computed, @cksumout filled in
* %GSS_S_FAILURE: Call failed
*/
u32
make_checksum_v2(struct krb5_ctx *kctx, char *header, int hdrlen,
struct xdr_buf *body, int body_offset, u8 *cksumkey,
unsigned int usage, struct xdr_netobj *cksumout)
gss_krb5_checksum(struct crypto_ahash *tfm, char *header, int hdrlen,
const struct xdr_buf *body, int body_offset,
struct xdr_netobj *cksumout)
{
struct crypto_ahash *tfm;
struct ahash_request *req;
struct scatterlist sg[1];
int err = -1;
int err = -ENOMEM;
u8 *checksumdata;
if (kctx->gk5e->keyed_cksum == 0) {
dprintk("%s: expected keyed hash for %s\n",
__func__, kctx->gk5e->name);
return GSS_S_FAILURE;
}
if (cksumkey == NULL) {
dprintk("%s: no key supplied for %s\n",
__func__, kctx->gk5e->name);
return GSS_S_FAILURE;
}
checksumdata = kmalloc(GSS_KRB5_MAX_CKSUM_LEN, GFP_KERNEL);
checksumdata = kmalloc(crypto_ahash_digestsize(tfm), GFP_KERNEL);
if (!checksumdata)
return GSS_S_FAILURE;
tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
goto out_free_cksum;
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req)
goto out_free_ahash;
goto out_free_cksum;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
err = crypto_ahash_setkey(tfm, cksumkey, kctx->gk5e->keylength);
if (err)
goto out;
err = crypto_ahash_init(req);
if (err)
goto out;
goto out_free_ahash;
/*
* Per RFC 4121 Section 4.2.4, the checksum is performed over the
* data body first, then over the octets in "header".
*/
err = xdr_process_buf(body, body_offset, body->len - body_offset,
checksummer, req);
if (err)
goto out;
if (header != NULL) {
goto out_free_ahash;
if (header) {
struct scatterlist sg[1];
sg_init_one(sg, header, hdrlen);
ahash_request_set_crypt(req, sg, NULL, hdrlen);
err = crypto_ahash_update(req);
if (err)
goto out;
goto out_free_ahash;
}
ahash_request_set_crypt(req, NULL, checksumdata, 0);
err = crypto_ahash_final(req);
if (err)
goto out;
goto out_free_ahash;
memcpy(cksumout->data, checksumdata, cksumout->len);
cksumout->len = kctx->gk5e->cksumlength;
switch (kctx->gk5e->ctype) {
case CKSUMTYPE_HMAC_SHA1_96_AES128:
case CKSUMTYPE_HMAC_SHA1_96_AES256:
/* note that this truncates the hash */
memcpy(cksumout->data, checksumdata, kctx->gk5e->cksumlength);
break;
default:
BUG();
break;
}
out:
ahash_request_free(req);
out_free_ahash:
crypto_free_ahash(tfm);
ahash_request_free(req);
out_free_cksum:
kfree(checksumdata);
return err ? GSS_S_FAILURE : 0;
kfree_sensitive(checksumdata);
return err ? GSS_S_FAILURE : GSS_S_COMPLETE;
}
EXPORT_SYMBOL_IF_KUNIT(gss_krb5_checksum);
struct encryptor_desc {
u8 iv[GSS_KRB5_MAX_BLOCKSIZE];
@ -526,7 +574,6 @@ xdr_extend_head(struct xdr_buf *buf, unsigned int base, unsigned int shiftlen)
if (shiftlen == 0)
return 0;
BUILD_BUG_ON(GSS_KRB5_MAX_SLACK_NEEDED > RPC_MAX_AUTH_SIZE);
BUG_ON(shiftlen > RPC_MAX_AUTH_SIZE);
p = buf->head[0].iov_base + base;
@ -595,40 +642,157 @@ out:
return ret;
}
/**
* krb5_cbc_cts_encrypt - encrypt in CBC mode with CTS
* @cts_tfm: CBC cipher with CTS
* @cbc_tfm: base CBC cipher
* @offset: starting byte offset for plaintext
* @buf: OUT: output buffer
* @pages: plaintext
* @iv: output CBC initialization vector, or NULL
* @ivsize: size of @iv, in octets
*
* To provide confidentiality, encrypt using cipher block chaining
* with ciphertext stealing. Message integrity is handled separately.
*
* Return values:
* %0: encryption successful
* negative errno: encryption could not be completed
*/
VISIBLE_IF_KUNIT
int krb5_cbc_cts_encrypt(struct crypto_sync_skcipher *cts_tfm,
struct crypto_sync_skcipher *cbc_tfm,
u32 offset, struct xdr_buf *buf, struct page **pages,
u8 *iv, unsigned int ivsize)
{
u32 blocksize, nbytes, nblocks, cbcbytes;
struct encryptor_desc desc;
int err;
blocksize = crypto_sync_skcipher_blocksize(cts_tfm);
nbytes = buf->len - offset;
nblocks = (nbytes + blocksize - 1) / blocksize;
cbcbytes = 0;
if (nblocks > 2)
cbcbytes = (nblocks - 2) * blocksize;
memset(desc.iv, 0, sizeof(desc.iv));
/* Handle block-sized chunks of plaintext with CBC. */
if (cbcbytes) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, cbc_tfm);
desc.pos = offset;
desc.fragno = 0;
desc.fraglen = 0;
desc.pages = pages;
desc.outbuf = buf;
desc.req = req;
skcipher_request_set_sync_tfm(req, cbc_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
sg_init_table(desc.infrags, 4);
sg_init_table(desc.outfrags, 4);
err = xdr_process_buf(buf, offset, cbcbytes, encryptor, &desc);
skcipher_request_zero(req);
if (err)
return err;
}
/* Remaining plaintext is handled with CBC-CTS. */
err = gss_krb5_cts_crypt(cts_tfm, buf, offset + cbcbytes,
desc.iv, pages, 1);
if (err)
return err;
if (unlikely(iv))
memcpy(iv, desc.iv, ivsize);
return 0;
}
EXPORT_SYMBOL_IF_KUNIT(krb5_cbc_cts_encrypt);
/**
* krb5_cbc_cts_decrypt - decrypt in CBC mode with CTS
* @cts_tfm: CBC cipher with CTS
* @cbc_tfm: base CBC cipher
* @offset: starting byte offset for plaintext
* @buf: OUT: output buffer
*
* Return values:
* %0: decryption successful
* negative errno: decryption could not be completed
*/
VISIBLE_IF_KUNIT
int krb5_cbc_cts_decrypt(struct crypto_sync_skcipher *cts_tfm,
struct crypto_sync_skcipher *cbc_tfm,
u32 offset, struct xdr_buf *buf)
{
u32 blocksize, nblocks, cbcbytes;
struct decryptor_desc desc;
int err;
blocksize = crypto_sync_skcipher_blocksize(cts_tfm);
nblocks = (buf->len + blocksize - 1) / blocksize;
cbcbytes = 0;
if (nblocks > 2)
cbcbytes = (nblocks - 2) * blocksize;
memset(desc.iv, 0, sizeof(desc.iv));
/* Handle block-sized chunks of plaintext with CBC. */
if (cbcbytes) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, cbc_tfm);
desc.fragno = 0;
desc.fraglen = 0;
desc.req = req;
skcipher_request_set_sync_tfm(req, cbc_tfm);
skcipher_request_set_callback(req, 0, NULL, NULL);
sg_init_table(desc.frags, 4);
err = xdr_process_buf(buf, 0, cbcbytes, decryptor, &desc);
skcipher_request_zero(req);
if (err)
return err;
}
/* Remaining plaintext is handled with CBC-CTS. */
return gss_krb5_cts_crypt(cts_tfm, buf, cbcbytes, desc.iv, NULL, 0);
}
EXPORT_SYMBOL_IF_KUNIT(krb5_cbc_cts_decrypt);
u32
gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
struct xdr_buf *buf, struct page **pages)
{
u32 err;
struct xdr_netobj hmac;
u8 *cksumkey;
u8 *ecptr;
struct crypto_sync_skcipher *cipher, *aux_cipher;
int blocksize;
struct crypto_ahash *ahash;
struct page **save_pages;
int nblocks, nbytes;
struct encryptor_desc desc;
u32 cbcbytes;
unsigned int usage;
unsigned int conflen;
if (kctx->initiate) {
cipher = kctx->initiator_enc;
aux_cipher = kctx->initiator_enc_aux;
cksumkey = kctx->initiator_integ;
usage = KG_USAGE_INITIATOR_SEAL;
ahash = kctx->initiator_integ;
} else {
cipher = kctx->acceptor_enc;
aux_cipher = kctx->acceptor_enc_aux;
cksumkey = kctx->acceptor_integ;
usage = KG_USAGE_ACCEPTOR_SEAL;
ahash = kctx->acceptor_integ;
}
blocksize = crypto_sync_skcipher_blocksize(cipher);
conflen = crypto_sync_skcipher_blocksize(cipher);
/* hide the gss token header and insert the confounder */
offset += GSS_KRB5_TOK_HDR_LEN;
if (xdr_extend_head(buf, offset, kctx->gk5e->conflen))
if (xdr_extend_head(buf, offset, conflen))
return GSS_S_FAILURE;
gss_krb5_make_confounder(buf->head[0].iov_base + offset, kctx->gk5e->conflen);
krb5_make_confounder(buf->head[0].iov_base + offset, conflen);
offset -= GSS_KRB5_TOK_HDR_LEN;
if (buf->tail[0].iov_base != NULL) {
@ -659,135 +823,60 @@ gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
save_pages = buf->pages;
buf->pages = pages;
err = make_checksum_v2(kctx, NULL, 0, buf,
offset + GSS_KRB5_TOK_HDR_LEN,
cksumkey, usage, &hmac);
err = gss_krb5_checksum(ahash, NULL, 0, buf,
offset + GSS_KRB5_TOK_HDR_LEN, &hmac);
buf->pages = save_pages;
if (err)
return GSS_S_FAILURE;
nbytes = buf->len - offset - GSS_KRB5_TOK_HDR_LEN;
nblocks = (nbytes + blocksize - 1) / blocksize;
cbcbytes = 0;
if (nblocks > 2)
cbcbytes = (nblocks - 2) * blocksize;
memset(desc.iv, 0, sizeof(desc.iv));
if (cbcbytes) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
desc.pos = offset + GSS_KRB5_TOK_HDR_LEN;
desc.fragno = 0;
desc.fraglen = 0;
desc.pages = pages;
desc.outbuf = buf;
desc.req = req;
skcipher_request_set_sync_tfm(req, aux_cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
sg_init_table(desc.infrags, 4);
sg_init_table(desc.outfrags, 4);
err = xdr_process_buf(buf, offset + GSS_KRB5_TOK_HDR_LEN,
cbcbytes, encryptor, &desc);
skcipher_request_zero(req);
if (err)
goto out_err;
}
/* Make sure IV carries forward from any CBC results. */
err = gss_krb5_cts_crypt(cipher, buf,
offset + GSS_KRB5_TOK_HDR_LEN + cbcbytes,
desc.iv, pages, 1);
if (err) {
err = GSS_S_FAILURE;
goto out_err;
}
err = krb5_cbc_cts_encrypt(cipher, aux_cipher,
offset + GSS_KRB5_TOK_HDR_LEN,
buf, pages, NULL, 0);
if (err)
return GSS_S_FAILURE;
/* Now update buf to account for HMAC */
buf->tail[0].iov_len += kctx->gk5e->cksumlength;
buf->len += kctx->gk5e->cksumlength;
out_err:
if (err)
err = GSS_S_FAILURE;
return err;
return GSS_S_COMPLETE;
}
u32
gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
{
struct xdr_buf subbuf;
u32 ret = 0;
u8 *cksum_key;
struct crypto_sync_skcipher *cipher, *aux_cipher;
struct crypto_ahash *ahash;
struct xdr_netobj our_hmac_obj;
u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
int nblocks, blocksize, cbcbytes;
struct decryptor_desc desc;
unsigned int usage;
struct xdr_buf subbuf;
u32 ret = 0;
if (kctx->initiate) {
cipher = kctx->acceptor_enc;
aux_cipher = kctx->acceptor_enc_aux;
cksum_key = kctx->acceptor_integ;
usage = KG_USAGE_ACCEPTOR_SEAL;
ahash = kctx->acceptor_integ;
} else {
cipher = kctx->initiator_enc;
aux_cipher = kctx->initiator_enc_aux;
cksum_key = kctx->initiator_integ;
usage = KG_USAGE_INITIATOR_SEAL;
ahash = kctx->initiator_integ;
}
blocksize = crypto_sync_skcipher_blocksize(cipher);
/* create a segment skipping the header and leaving out the checksum */
xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
(len - offset - GSS_KRB5_TOK_HDR_LEN -
kctx->gk5e->cksumlength));
nblocks = (subbuf.len + blocksize - 1) / blocksize;
cbcbytes = 0;
if (nblocks > 2)
cbcbytes = (nblocks - 2) * blocksize;
memset(desc.iv, 0, sizeof(desc.iv));
if (cbcbytes) {
SYNC_SKCIPHER_REQUEST_ON_STACK(req, aux_cipher);
desc.fragno = 0;
desc.fraglen = 0;
desc.req = req;
skcipher_request_set_sync_tfm(req, aux_cipher);
skcipher_request_set_callback(req, 0, NULL, NULL);
sg_init_table(desc.frags, 4);
ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc);
skcipher_request_zero(req);
if (ret)
goto out_err;
}
/* Make sure IV carries forward from any CBC results. */
ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0);
ret = krb5_cbc_cts_decrypt(cipher, aux_cipher, 0, &subbuf);
if (ret)
goto out_err;
/* Calculate our hmac over the plaintext data */
our_hmac_obj.len = sizeof(our_hmac);
our_hmac_obj.data = our_hmac;
ret = make_checksum_v2(kctx, NULL, 0, &subbuf, 0,
cksum_key, usage, &our_hmac_obj);
ret = gss_krb5_checksum(ahash, NULL, 0, &subbuf, 0, &our_hmac_obj);
if (ret)
goto out_err;
@ -801,10 +890,255 @@ gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
ret = GSS_S_BAD_SIG;
goto out_err;
}
*headskip = kctx->gk5e->conflen;
*headskip = crypto_sync_skcipher_blocksize(cipher);
*tailskip = kctx->gk5e->cksumlength;
out_err:
if (ret && ret != GSS_S_BAD_SIG)
ret = GSS_S_FAILURE;
return ret;
}
/**
* krb5_etm_checksum - Compute a MAC for a GSS Wrap token
* @cipher: an initialized cipher transform
* @tfm: an initialized hash transform
* @body: xdr_buf containing an RPC message (body.len is the message length)
* @body_offset: byte offset into @body to start checksumming
* @cksumout: OUT: a buffer to be filled in with the computed HMAC
*
* Usually expressed as H = HMAC(K, IV | ciphertext)[1..h] .
*
* Caller provides the truncation length of the output token (h) in
* cksumout.len.
*
* Return values:
* %GSS_S_COMPLETE: Digest computed, @cksumout filled in
* %GSS_S_FAILURE: Call failed
*/
VISIBLE_IF_KUNIT
u32 krb5_etm_checksum(struct crypto_sync_skcipher *cipher,
struct crypto_ahash *tfm, const struct xdr_buf *body,
int body_offset, struct xdr_netobj *cksumout)
{
unsigned int ivsize = crypto_sync_skcipher_ivsize(cipher);
struct ahash_request *req;
struct scatterlist sg[1];
u8 *iv, *checksumdata;
int err = -ENOMEM;
checksumdata = kmalloc(crypto_ahash_digestsize(tfm), GFP_KERNEL);
if (!checksumdata)
return GSS_S_FAILURE;
/* For RPCSEC, the "initial cipher state" is always all zeroes. */
iv = kzalloc(ivsize, GFP_KERNEL);
if (!iv)
goto out_free_mem;
req = ahash_request_alloc(tfm, GFP_KERNEL);
if (!req)
goto out_free_mem;
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP, NULL, NULL);
err = crypto_ahash_init(req);
if (err)
goto out_free_ahash;
sg_init_one(sg, iv, ivsize);
ahash_request_set_crypt(req, sg, NULL, ivsize);
err = crypto_ahash_update(req);
if (err)
goto out_free_ahash;
err = xdr_process_buf(body, body_offset, body->len - body_offset,
checksummer, req);
if (err)
goto out_free_ahash;
ahash_request_set_crypt(req, NULL, checksumdata, 0);
err = crypto_ahash_final(req);
if (err)
goto out_free_ahash;
memcpy(cksumout->data, checksumdata, cksumout->len);
out_free_ahash:
ahash_request_free(req);
out_free_mem:
kfree(iv);
kfree_sensitive(checksumdata);
return err ? GSS_S_FAILURE : GSS_S_COMPLETE;
}
EXPORT_SYMBOL_IF_KUNIT(krb5_etm_checksum);
/**
* krb5_etm_encrypt - Encrypt using the RFC 8009 rules
* @kctx: Kerberos context
* @offset: starting offset of the payload, in bytes
* @buf: OUT: send buffer to contain the encrypted payload
* @pages: plaintext payload
*
* The main difference with aes_encrypt is that "The HMAC is
* calculated over the cipher state concatenated with the AES
* output, instead of being calculated over the confounder and
* plaintext. This allows the message receiver to verify the
* integrity of the message before decrypting the message."
*
* RFC 8009 Section 5:
*
* encryption function: as follows, where E() is AES encryption in
* CBC-CS3 mode, and h is the size of truncated HMAC (128 bits or
* 192 bits as described above).
*
* N = random value of length 128 bits (the AES block size)
* IV = cipher state
* C = E(Ke, N | plaintext, IV)
* H = HMAC(Ki, IV | C)
* ciphertext = C | H[1..h]
*
* This encryption formula provides AEAD EtM with key separation.
*
* Return values:
* %GSS_S_COMPLETE: Encryption successful
* %GSS_S_FAILURE: Encryption failed
*/
u32
krb5_etm_encrypt(struct krb5_ctx *kctx, u32 offset,
struct xdr_buf *buf, struct page **pages)
{
struct crypto_sync_skcipher *cipher, *aux_cipher;
struct crypto_ahash *ahash;
struct xdr_netobj hmac;
unsigned int conflen;
u8 *ecptr;
u32 err;
if (kctx->initiate) {
cipher = kctx->initiator_enc;
aux_cipher = kctx->initiator_enc_aux;
ahash = kctx->initiator_integ;
} else {
cipher = kctx->acceptor_enc;
aux_cipher = kctx->acceptor_enc_aux;
ahash = kctx->acceptor_integ;
}
conflen = crypto_sync_skcipher_blocksize(cipher);
offset += GSS_KRB5_TOK_HDR_LEN;
if (xdr_extend_head(buf, offset, conflen))
return GSS_S_FAILURE;
krb5_make_confounder(buf->head[0].iov_base + offset, conflen);
offset -= GSS_KRB5_TOK_HDR_LEN;
if (buf->tail[0].iov_base) {
ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len;
} else {
buf->tail[0].iov_base = buf->head[0].iov_base
+ buf->head[0].iov_len;
buf->tail[0].iov_len = 0;
ecptr = buf->tail[0].iov_base;
}
memcpy(ecptr, buf->head[0].iov_base + offset, GSS_KRB5_TOK_HDR_LEN);
buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN;
buf->len += GSS_KRB5_TOK_HDR_LEN;
err = krb5_cbc_cts_encrypt(cipher, aux_cipher,
offset + GSS_KRB5_TOK_HDR_LEN,
buf, pages, NULL, 0);
if (err)
return GSS_S_FAILURE;
hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len;
hmac.len = kctx->gk5e->cksumlength;
err = krb5_etm_checksum(cipher, ahash,
buf, offset + GSS_KRB5_TOK_HDR_LEN, &hmac);
if (err)
goto out_err;
buf->tail[0].iov_len += kctx->gk5e->cksumlength;
buf->len += kctx->gk5e->cksumlength;
return GSS_S_COMPLETE;
out_err:
return GSS_S_FAILURE;
}
/**
* krb5_etm_decrypt - Decrypt using the RFC 8009 rules
* @kctx: Kerberos context
* @offset: starting offset of the ciphertext, in bytes
* @len:
* @buf:
* @headskip: OUT: the enctype's confounder length, in octets
* @tailskip: OUT: the enctype's HMAC length, in octets
*
* RFC 8009 Section 5:
*
* decryption function: as follows, where D() is AES decryption in
* CBC-CS3 mode, and h is the size of truncated HMAC.
*
* (C, H) = ciphertext
* (Note: H is the last h bits of the ciphertext.)
* IV = cipher state
* if H != HMAC(Ki, IV | C)[1..h]
* stop, report error
* (N, P) = D(Ke, C, IV)
*
* Return values:
* %GSS_S_COMPLETE: Decryption successful
* %GSS_S_BAD_SIG: computed HMAC != received HMAC
* %GSS_S_FAILURE: Decryption failed
*/
u32
krb5_etm_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
struct xdr_buf *buf, u32 *headskip, u32 *tailskip)
{
struct crypto_sync_skcipher *cipher, *aux_cipher;
u8 our_hmac[GSS_KRB5_MAX_CKSUM_LEN];
u8 pkt_hmac[GSS_KRB5_MAX_CKSUM_LEN];
struct xdr_netobj our_hmac_obj;
struct crypto_ahash *ahash;
struct xdr_buf subbuf;
u32 ret = 0;
if (kctx->initiate) {
cipher = kctx->acceptor_enc;
aux_cipher = kctx->acceptor_enc_aux;
ahash = kctx->acceptor_integ;
} else {
cipher = kctx->initiator_enc;
aux_cipher = kctx->initiator_enc_aux;
ahash = kctx->initiator_integ;
}
/* Extract the ciphertext into @subbuf. */
xdr_buf_subsegment(buf, &subbuf, offset + GSS_KRB5_TOK_HDR_LEN,
(len - offset - GSS_KRB5_TOK_HDR_LEN -
kctx->gk5e->cksumlength));
our_hmac_obj.data = our_hmac;
our_hmac_obj.len = kctx->gk5e->cksumlength;
ret = krb5_etm_checksum(cipher, ahash, &subbuf, 0, &our_hmac_obj);
if (ret)
goto out_err;
ret = read_bytes_from_xdr_buf(buf, len - kctx->gk5e->cksumlength,
pkt_hmac, kctx->gk5e->cksumlength);
if (ret)
goto out_err;
if (crypto_memneq(pkt_hmac, our_hmac, kctx->gk5e->cksumlength) != 0) {
ret = GSS_S_BAD_SIG;
goto out_err;
}
ret = krb5_cbc_cts_decrypt(cipher, aux_cipher, 0, &subbuf);
if (ret) {
ret = GSS_S_FAILURE;
goto out_err;
}
*headskip = crypto_sync_skcipher_blocksize(cipher);
*tailskip = kctx->gk5e->cksumlength;
return GSS_S_COMPLETE;
out_err:
if (ret != GSS_S_BAD_SIG)
ret = GSS_S_FAILURE;
return ret;
}

View File

@ -0,0 +1,232 @@
/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
/*
* SunRPC GSS Kerberos 5 mechanism internal definitions
*
* Copyright (c) 2022 Oracle and/or its affiliates.
*/
#ifndef _NET_SUNRPC_AUTH_GSS_KRB5_INTERNAL_H
#define _NET_SUNRPC_AUTH_GSS_KRB5_INTERNAL_H
/*
* The RFCs often specify payload lengths in bits. This helper
* converts a specified bit-length to the number of octets/bytes.
*/
#define BITS2OCTETS(x) ((x) / 8)
struct krb5_ctx;
struct gss_krb5_enctype {
const u32 etype; /* encryption (key) type */
const u32 ctype; /* checksum type */
const char *name; /* "friendly" name */
const char *encrypt_name; /* crypto encrypt name */
const char *aux_cipher; /* aux encrypt cipher name */
const char *cksum_name; /* crypto checksum name */
const u16 signalg; /* signing algorithm */
const u16 sealalg; /* sealing algorithm */
const u32 cksumlength; /* checksum length */
const u32 keyed_cksum; /* is it a keyed cksum? */
const u32 keybytes; /* raw key len, in bytes */
const u32 keylength; /* protocol key length, in octets */
const u32 Kc_length; /* checksum subkey length, in octets */
const u32 Ke_length; /* encryption subkey length, in octets */
const u32 Ki_length; /* integrity subkey length, in octets */
int (*import_ctx)(struct krb5_ctx *ctx, gfp_t gfp_mask);
int (*derive_key)(const struct gss_krb5_enctype *gk5e,
const struct xdr_netobj *in,
struct xdr_netobj *out,
const struct xdr_netobj *label,
gfp_t gfp_mask);
u32 (*encrypt)(struct krb5_ctx *kctx, u32 offset,
struct xdr_buf *buf, struct page **pages);
u32 (*decrypt)(struct krb5_ctx *kctx, u32 offset, u32 len,
struct xdr_buf *buf, u32 *headskip, u32 *tailskip);
u32 (*get_mic)(struct krb5_ctx *kctx, struct xdr_buf *text,
struct xdr_netobj *token);
u32 (*verify_mic)(struct krb5_ctx *kctx, struct xdr_buf *message_buffer,
struct xdr_netobj *read_token);
u32 (*wrap)(struct krb5_ctx *kctx, int offset,
struct xdr_buf *buf, struct page **pages);
u32 (*unwrap)(struct krb5_ctx *kctx, int offset, int len,
struct xdr_buf *buf, unsigned int *slack,
unsigned int *align);
};
/* krb5_ctx flags definitions */
#define KRB5_CTX_FLAG_INITIATOR 0x00000001
#define KRB5_CTX_FLAG_ACCEPTOR_SUBKEY 0x00000004
struct krb5_ctx {
int initiate; /* 1 = initiating, 0 = accepting */
u32 enctype;
u32 flags;
const struct gss_krb5_enctype *gk5e; /* enctype-specific info */
struct crypto_sync_skcipher *enc;
struct crypto_sync_skcipher *seq;
struct crypto_sync_skcipher *acceptor_enc;
struct crypto_sync_skcipher *initiator_enc;
struct crypto_sync_skcipher *acceptor_enc_aux;
struct crypto_sync_skcipher *initiator_enc_aux;
struct crypto_ahash *acceptor_sign;
struct crypto_ahash *initiator_sign;
struct crypto_ahash *initiator_integ;
struct crypto_ahash *acceptor_integ;
u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
u8 cksum[GSS_KRB5_MAX_KEYLEN];
atomic_t seq_send;
atomic64_t seq_send64;
time64_t endtime;
struct xdr_netobj mech_used;
};
/*
* GSS Kerberos 5 mechanism Per-Message calls.
*/
u32 gss_krb5_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
struct xdr_netobj *token);
u32 gss_krb5_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
struct xdr_netobj *token);
u32 gss_krb5_verify_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *message_buffer,
struct xdr_netobj *read_token);
u32 gss_krb5_verify_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *message_buffer,
struct xdr_netobj *read_token);
u32 gss_krb5_wrap_v1(struct krb5_ctx *kctx, int offset,
struct xdr_buf *buf, struct page **pages);
u32 gss_krb5_wrap_v2(struct krb5_ctx *kctx, int offset,
struct xdr_buf *buf, struct page **pages);
u32 gss_krb5_unwrap_v1(struct krb5_ctx *kctx, int offset, int len,
struct xdr_buf *buf, unsigned int *slack,
unsigned int *align);
u32 gss_krb5_unwrap_v2(struct krb5_ctx *kctx, int offset, int len,
struct xdr_buf *buf, unsigned int *slack,
unsigned int *align);
/*
* Implementation internal functions
*/
/* Key Derivation Functions */
int krb5_derive_key_v1(const struct gss_krb5_enctype *gk5e,
const struct xdr_netobj *inkey,
struct xdr_netobj *outkey,
const struct xdr_netobj *label,
gfp_t gfp_mask);
int krb5_derive_key_v2(const struct gss_krb5_enctype *gk5e,
const struct xdr_netobj *inkey,
struct xdr_netobj *outkey,
const struct xdr_netobj *label,
gfp_t gfp_mask);
int krb5_kdf_hmac_sha2(const struct gss_krb5_enctype *gk5e,
const struct xdr_netobj *inkey,
struct xdr_netobj *outkey,
const struct xdr_netobj *in_constant,
gfp_t gfp_mask);
int krb5_kdf_feedback_cmac(const struct gss_krb5_enctype *gk5e,
const struct xdr_netobj *inkey,
struct xdr_netobj *outkey,
const struct xdr_netobj *in_constant,
gfp_t gfp_mask);
/**
* krb5_derive_key - Derive a subkey from a protocol key
* @kctx: Kerberos 5 context
* @inkey: base protocol key
* @outkey: OUT: derived key
* @usage: key usage value
* @seed: key usage seed (one octet)
* @gfp_mask: memory allocation control flags
*
* Caller sets @outkey->len to the desired length of the derived key.
*
* On success, returns 0 and fills in @outkey. A negative errno value
* is returned on failure.
*/
static inline int krb5_derive_key(struct krb5_ctx *kctx,
const struct xdr_netobj *inkey,
struct xdr_netobj *outkey,
u32 usage, u8 seed, gfp_t gfp_mask)
{
const struct gss_krb5_enctype *gk5e = kctx->gk5e;
u8 label_data[GSS_KRB5_K5CLENGTH];
struct xdr_netobj label = {
.len = sizeof(label_data),
.data = label_data,
};
__be32 *p = (__be32 *)label_data;
*p = cpu_to_be32(usage);
label_data[4] = seed;
return gk5e->derive_key(gk5e, inkey, outkey, &label, gfp_mask);
}
s32 krb5_make_seq_num(struct krb5_ctx *kctx, struct crypto_sync_skcipher *key,
int direction, u32 seqnum, unsigned char *cksum,
unsigned char *buf);
s32 krb5_get_seq_num(struct krb5_ctx *kctx, unsigned char *cksum,
unsigned char *buf, int *direction, u32 *seqnum);
void krb5_make_confounder(u8 *p, int conflen);
u32 make_checksum(struct krb5_ctx *kctx, char *header, int hdrlen,
struct xdr_buf *body, int body_offset, u8 *cksumkey,
unsigned int usage, struct xdr_netobj *cksumout);
u32 gss_krb5_checksum(struct crypto_ahash *tfm, char *header, int hdrlen,
const struct xdr_buf *body, int body_offset,
struct xdr_netobj *cksumout);
u32 krb5_encrypt(struct crypto_sync_skcipher *key, void *iv, void *in,
void *out, int length);
u32 krb5_decrypt(struct crypto_sync_skcipher *key, void *iv, void *in,
void *out, int length);
int xdr_extend_head(struct xdr_buf *buf, unsigned int base,
unsigned int shiftlen);
int gss_encrypt_xdr_buf(struct crypto_sync_skcipher *tfm,
struct xdr_buf *outbuf, int offset,
struct page **pages);
int gss_decrypt_xdr_buf(struct crypto_sync_skcipher *tfm,
struct xdr_buf *inbuf, int offset);
u32 gss_krb5_aes_encrypt(struct krb5_ctx *kctx, u32 offset,
struct xdr_buf *buf, struct page **pages);
u32 gss_krb5_aes_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
struct xdr_buf *buf, u32 *plainoffset, u32 *plainlen);
u32 krb5_etm_encrypt(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf,
struct page **pages);
u32 krb5_etm_decrypt(struct krb5_ctx *kctx, u32 offset, u32 len,
struct xdr_buf *buf, u32 *headskip, u32 *tailskip);
#if IS_ENABLED(CONFIG_KUNIT)
void krb5_nfold(u32 inbits, const u8 *in, u32 outbits, u8 *out);
const struct gss_krb5_enctype *gss_krb5_lookup_enctype(u32 etype);
int krb5_cbc_cts_encrypt(struct crypto_sync_skcipher *cts_tfm,
struct crypto_sync_skcipher *cbc_tfm, u32 offset,
struct xdr_buf *buf, struct page **pages,
u8 *iv, unsigned int ivsize);
int krb5_cbc_cts_decrypt(struct crypto_sync_skcipher *cts_tfm,
struct crypto_sync_skcipher *cbc_tfm,
u32 offset, struct xdr_buf *buf);
u32 krb5_etm_checksum(struct crypto_sync_skcipher *cipher,
struct crypto_ahash *tfm, const struct xdr_buf *body,
int body_offset, struct xdr_netobj *cksumout);
#endif
#endif /* _NET_SUNRPC_AUTH_GSS_KRB5_INTERNAL_H */

View File

@ -60,18 +60,27 @@
#include <linux/sunrpc/gss_krb5.h>
#include <linux/sunrpc/xdr.h>
#include <linux/lcm.h>
#include <crypto/hash.h>
#include <kunit/visibility.h>
#include "gss_krb5_internal.h"
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
/*
/**
* krb5_nfold - n-fold function
* @inbits: number of bits in @in
* @in: buffer containing input to fold
* @outbits: number of bits in the output buffer
* @out: buffer to hold the result
*
* This is the n-fold function as described in rfc3961, sec 5.1
* Taken from MIT Kerberos and modified.
*/
static void krb5_nfold(u32 inbits, const u8 *in,
u32 outbits, u8 *out)
VISIBLE_IF_KUNIT
void krb5_nfold(u32 inbits, const u8 *in, u32 outbits, u8 *out)
{
unsigned long ulcm;
int byte, i, msbit;
@ -132,40 +141,36 @@ static void krb5_nfold(u32 inbits, const u8 *in,
}
}
}
EXPORT_SYMBOL_IF_KUNIT(krb5_nfold);
/*
* This is the DK (derive_key) function as described in rfc3961, sec 5.1
* Taken from MIT Kerberos and modified.
*/
u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
const struct xdr_netobj *inkey,
struct xdr_netobj *outkey,
const struct xdr_netobj *in_constant,
gfp_t gfp_mask)
static int krb5_DK(const struct gss_krb5_enctype *gk5e,
const struct xdr_netobj *inkey, u8 *rawkey,
const struct xdr_netobj *in_constant, gfp_t gfp_mask)
{
size_t blocksize, keybytes, keylength, n;
unsigned char *inblockdata, *outblockdata, *rawkey;
unsigned char *inblockdata, *outblockdata;
struct xdr_netobj inblock, outblock;
struct crypto_sync_skcipher *cipher;
u32 ret = EINVAL;
int ret = -EINVAL;
blocksize = gk5e->blocksize;
keybytes = gk5e->keybytes;
keylength = gk5e->keylength;
if ((inkey->len != keylength) || (outkey->len != keylength))
if (inkey->len != keylength)
goto err_return;
cipher = crypto_alloc_sync_skcipher(gk5e->encrypt_name, 0, 0);
if (IS_ERR(cipher))
goto err_return;
blocksize = crypto_sync_skcipher_blocksize(cipher);
if (crypto_sync_skcipher_setkey(cipher, inkey->data, inkey->len))
goto err_return;
/* allocate and set up buffers */
ret = ENOMEM;
ret = -ENOMEM;
inblockdata = kmalloc(blocksize, gfp_mask);
if (inblockdata == NULL)
goto err_free_cipher;
@ -174,10 +179,6 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
if (outblockdata == NULL)
goto err_free_in;
rawkey = kmalloc(keybytes, gfp_mask);
if (rawkey == NULL)
goto err_free_out;
inblock.data = (char *) inblockdata;
inblock.len = blocksize;
@ -197,8 +198,8 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
n = 0;
while (n < keybytes) {
(*(gk5e->encrypt))(cipher, NULL, inblock.data,
outblock.data, inblock.len);
krb5_encrypt(cipher, NULL, inblock.data, outblock.data,
inblock.len);
if ((keybytes - n) <= outblock.len) {
memcpy(rawkey + n, outblock.data, (keybytes - n));
@ -210,26 +211,8 @@ u32 krb5_derive_key(const struct gss_krb5_enctype *gk5e,
n += outblock.len;
}
/* postprocess the key */
inblock.data = (char *) rawkey;
inblock.len = keybytes;
BUG_ON(gk5e->mk_key == NULL);
ret = (*(gk5e->mk_key))(gk5e, &inblock, outkey);
if (ret) {
dprintk("%s: got %d from mk_key function for '%s'\n",
__func__, ret, gk5e->encrypt_name);
goto err_free_raw;
}
/* clean memory, free resources and exit */
ret = 0;
err_free_raw:
kfree_sensitive(rawkey);
err_free_out:
kfree_sensitive(outblockdata);
err_free_in:
kfree_sensitive(inblockdata);
@ -252,15 +235,11 @@ static void mit_des_fixup_key_parity(u8 key[8])
}
}
/*
* This is the des3 key derivation postprocess function
*/
u32 gss_krb5_des3_make_key(const struct gss_krb5_enctype *gk5e,
struct xdr_netobj *randombits,
struct xdr_netobj *key)
static int krb5_random_to_key_v1(const struct gss_krb5_enctype *gk5e,
struct xdr_netobj *randombits,
struct xdr_netobj *key)
{
int i;
u32 ret = EINVAL;
int i, ret = -EINVAL;
if (key->len != 24) {
dprintk("%s: key->len is %d\n", __func__, key->len);
@ -292,14 +271,49 @@ err_out:
return ret;
}
/*
* This is the aes key derivation postprocess function
/**
* krb5_derive_key_v1 - Derive a subkey for an RFC 3961 enctype
* @gk5e: Kerberos 5 enctype profile
* @inkey: base protocol key
* @outkey: OUT: derived key
* @label: subkey usage label
* @gfp_mask: memory allocation control flags
*
* Caller sets @outkey->len to the desired length of the derived key.
*
* On success, returns 0 and fills in @outkey. A negative errno value
* is returned on failure.
*/
u32 gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e,
struct xdr_netobj *randombits,
struct xdr_netobj *key)
int krb5_derive_key_v1(const struct gss_krb5_enctype *gk5e,
const struct xdr_netobj *inkey,
struct xdr_netobj *outkey,
const struct xdr_netobj *label,
gfp_t gfp_mask)
{
u32 ret = EINVAL;
struct xdr_netobj inblock;
int ret;
inblock.len = gk5e->keybytes;
inblock.data = kmalloc(inblock.len, gfp_mask);
if (!inblock.data)
return -ENOMEM;
ret = krb5_DK(gk5e, inkey, inblock.data, label, gfp_mask);
if (!ret)
ret = krb5_random_to_key_v1(gk5e, &inblock, outkey);
kfree_sensitive(inblock.data);
return ret;
}
/*
* This is the identity function, with some sanity checking.
*/
static int krb5_random_to_key_v2(const struct gss_krb5_enctype *gk5e,
struct xdr_netobj *randombits,
struct xdr_netobj *key)
{
int ret = -EINVAL;
if (key->len != 16 && key->len != 32) {
dprintk("%s: key->len is %d\n", __func__, key->len);
@ -320,3 +334,297 @@ u32 gss_krb5_aes_make_key(const struct gss_krb5_enctype *gk5e,
err_out:
return ret;
}
/**
* krb5_derive_key_v2 - Derive a subkey for an RFC 3962 enctype
* @gk5e: Kerberos 5 enctype profile
* @inkey: base protocol key
* @outkey: OUT: derived key
* @label: subkey usage label
* @gfp_mask: memory allocation control flags
*
* Caller sets @outkey->len to the desired length of the derived key.
*
* On success, returns 0 and fills in @outkey. A negative errno value
* is returned on failure.
*/
int krb5_derive_key_v2(const struct gss_krb5_enctype *gk5e,
const struct xdr_netobj *inkey,
struct xdr_netobj *outkey,
const struct xdr_netobj *label,
gfp_t gfp_mask)
{
struct xdr_netobj inblock;
int ret;
inblock.len = gk5e->keybytes;
inblock.data = kmalloc(inblock.len, gfp_mask);
if (!inblock.data)
return -ENOMEM;
ret = krb5_DK(gk5e, inkey, inblock.data, label, gfp_mask);
if (!ret)
ret = krb5_random_to_key_v2(gk5e, &inblock, outkey);
kfree_sensitive(inblock.data);
return ret;
}
/*
* K(i) = CMAC(key, K(i-1) | i | constant | 0x00 | k)
*
* i: A block counter is used with a length of 4 bytes, represented
* in big-endian order.
*
* constant: The label input to the KDF is the usage constant supplied
* to the key derivation function
*
* k: The length of the output key in bits, represented as a 4-byte
* string in big-endian order.
*
* Caller fills in K(i-1) in @step, and receives the result K(i)
* in the same buffer.
*/
static int
krb5_cmac_Ki(struct crypto_shash *tfm, const struct xdr_netobj *constant,
u32 outlen, u32 count, struct xdr_netobj *step)
{
__be32 k = cpu_to_be32(outlen * 8);
SHASH_DESC_ON_STACK(desc, tfm);
__be32 i = cpu_to_be32(count);
u8 zero = 0;
int ret;
desc->tfm = tfm;
ret = crypto_shash_init(desc);
if (ret)
goto out_err;
ret = crypto_shash_update(desc, step->data, step->len);
if (ret)
goto out_err;
ret = crypto_shash_update(desc, (u8 *)&i, sizeof(i));
if (ret)
goto out_err;
ret = crypto_shash_update(desc, constant->data, constant->len);
if (ret)
goto out_err;
ret = crypto_shash_update(desc, &zero, sizeof(zero));
if (ret)
goto out_err;
ret = crypto_shash_update(desc, (u8 *)&k, sizeof(k));
if (ret)
goto out_err;
ret = crypto_shash_final(desc, step->data);
if (ret)
goto out_err;
out_err:
shash_desc_zero(desc);
return ret;
}
/**
* krb5_kdf_feedback_cmac - Derive a subkey for a Camellia/CMAC-based enctype
* @gk5e: Kerberos 5 enctype parameters
* @inkey: base protocol key
* @outkey: OUT: derived key
* @constant: subkey usage label
* @gfp_mask: memory allocation control flags
*
* RFC 6803 Section 3:
*
* "We use a key derivation function from the family specified in
* [SP800-108], Section 5.2, 'KDF in Feedback Mode'."
*
* n = ceiling(k / 128)
* K(0) = zeros
* K(i) = CMAC(key, K(i-1) | i | constant | 0x00 | k)
* DR(key, constant) = k-truncate(K(1) | K(2) | ... | K(n))
* KDF-FEEDBACK-CMAC(key, constant) = random-to-key(DR(key, constant))
*
* Caller sets @outkey->len to the desired length of the derived key (k).
*
* On success, returns 0 and fills in @outkey. A negative errno value
* is returned on failure.
*/
int
krb5_kdf_feedback_cmac(const struct gss_krb5_enctype *gk5e,
const struct xdr_netobj *inkey,
struct xdr_netobj *outkey,
const struct xdr_netobj *constant,
gfp_t gfp_mask)
{
struct xdr_netobj step = { .data = NULL };
struct xdr_netobj DR = { .data = NULL };
unsigned int blocksize, offset;
struct crypto_shash *tfm;
int n, count, ret;
/*
* This implementation assumes the CMAC used for an enctype's
* key derivation is the same as the CMAC used for its
* checksumming. This happens to be true for enctypes that
* are currently supported by this implementation.
*/
tfm = crypto_alloc_shash(gk5e->cksum_name, 0, 0);
if (IS_ERR(tfm)) {
ret = PTR_ERR(tfm);
goto out;
}
ret = crypto_shash_setkey(tfm, inkey->data, inkey->len);
if (ret)
goto out_free_tfm;
blocksize = crypto_shash_digestsize(tfm);
n = (outkey->len + blocksize - 1) / blocksize;
/* K(0) is all zeroes */
ret = -ENOMEM;
step.len = blocksize;
step.data = kzalloc(step.len, gfp_mask);
if (!step.data)
goto out_free_tfm;
DR.len = blocksize * n;
DR.data = kmalloc(DR.len, gfp_mask);
if (!DR.data)
goto out_free_tfm;
/* XXX: Does not handle partial-block key sizes */
for (offset = 0, count = 1; count <= n; count++) {
ret = krb5_cmac_Ki(tfm, constant, outkey->len, count, &step);
if (ret)
goto out_free_tfm;
memcpy(DR.data + offset, step.data, blocksize);
offset += blocksize;
}
/* k-truncate and random-to-key */
memcpy(outkey->data, DR.data, outkey->len);
ret = 0;
out_free_tfm:
crypto_free_shash(tfm);
out:
kfree_sensitive(step.data);
kfree_sensitive(DR.data);
return ret;
}
/*
* K1 = HMAC-SHA(key, 0x00000001 | label | 0x00 | k)
*
* key: The source of entropy from which subsequent keys are derived.
*
* label: An octet string describing the intended usage of the
* derived key.
*
* k: Length in bits of the key to be outputted, expressed in
* big-endian binary representation in 4 bytes.
*/
static int
krb5_hmac_K1(struct crypto_shash *tfm, const struct xdr_netobj *label,
u32 outlen, struct xdr_netobj *K1)
{
__be32 k = cpu_to_be32(outlen * 8);
SHASH_DESC_ON_STACK(desc, tfm);
__be32 one = cpu_to_be32(1);
u8 zero = 0;
int ret;
desc->tfm = tfm;
ret = crypto_shash_init(desc);
if (ret)
goto out_err;
ret = crypto_shash_update(desc, (u8 *)&one, sizeof(one));
if (ret)
goto out_err;
ret = crypto_shash_update(desc, label->data, label->len);
if (ret)
goto out_err;
ret = crypto_shash_update(desc, &zero, sizeof(zero));
if (ret)
goto out_err;
ret = crypto_shash_update(desc, (u8 *)&k, sizeof(k));
if (ret)
goto out_err;
ret = crypto_shash_final(desc, K1->data);
if (ret)
goto out_err;
out_err:
shash_desc_zero(desc);
return ret;
}
/**
* krb5_kdf_hmac_sha2 - Derive a subkey for an AES/SHA2-based enctype
* @gk5e: Kerberos 5 enctype policy parameters
* @inkey: base protocol key
* @outkey: OUT: derived key
* @label: subkey usage label
* @gfp_mask: memory allocation control flags
*
* RFC 8009 Section 3:
*
* "We use a key derivation function from Section 5.1 of [SP800-108],
* which uses the HMAC algorithm as the PRF."
*
* function KDF-HMAC-SHA2(key, label, [context,] k):
* k-truncate(K1)
*
* Caller sets @outkey->len to the desired length of the derived key.
*
* On success, returns 0 and fills in @outkey. A negative errno value
* is returned on failure.
*/
int
krb5_kdf_hmac_sha2(const struct gss_krb5_enctype *gk5e,
const struct xdr_netobj *inkey,
struct xdr_netobj *outkey,
const struct xdr_netobj *label,
gfp_t gfp_mask)
{
struct crypto_shash *tfm;
struct xdr_netobj K1 = {
.data = NULL,
};
int ret;
/*
* This implementation assumes the HMAC used for an enctype's
* key derivation is the same as the HMAC used for its
* checksumming. This happens to be true for enctypes that
* are currently supported by this implementation.
*/
tfm = crypto_alloc_shash(gk5e->cksum_name, 0, 0);
if (IS_ERR(tfm)) {
ret = PTR_ERR(tfm);
goto out;
}
ret = crypto_shash_setkey(tfm, inkey->data, inkey->len);
if (ret)
goto out_free_tfm;
K1.len = crypto_shash_digestsize(tfm);
K1.data = kmalloc(K1.len, gfp_mask);
if (!K1.data) {
ret = -ENOMEM;
goto out_free_tfm;
}
ret = krb5_hmac_K1(tfm, label, outkey->len, &K1);
if (ret)
goto out_free_tfm;
/* k-truncate and random-to-key */
memcpy(outkey->data, K1.data, outkey->len);
out_free_tfm:
kfree_sensitive(K1.data);
crypto_free_shash(tfm);
out:
return ret;
}

View File

@ -19,18 +19,27 @@
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/gss_krb5.h>
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/gss_krb5_enctypes.h>
#include <kunit/visibility.h>
#include "auth_gss_internal.h"
#include "gss_krb5_internal.h"
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
static struct gss_api_mech gss_kerberos_mech; /* forward declaration */
static struct gss_api_mech gss_kerberos_mech;
#if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED)
static int gss_krb5_import_ctx_des(struct krb5_ctx *ctx, gfp_t gfp_mask);
static int gss_krb5_import_ctx_v1(struct krb5_ctx *ctx, gfp_t gfp_mask);
#endif
#if defined(CONFIG_RPCSEC_GSS_KRB5_CRYPTOSYSTEM)
static int gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask);
#endif
static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
#ifndef CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES
#if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_DES)
/*
* DES (All DES enctypes are mapped to the same gss functionality)
*/
@ -40,19 +49,18 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
.name = "des-cbc-crc",
.encrypt_name = "cbc(des)",
.cksum_name = "md5",
.encrypt = krb5_encrypt,
.decrypt = krb5_decrypt,
.mk_key = NULL,
.import_ctx = gss_krb5_import_ctx_des,
.get_mic = gss_krb5_get_mic_v1,
.verify_mic = gss_krb5_verify_mic_v1,
.wrap = gss_krb5_wrap_v1,
.unwrap = gss_krb5_unwrap_v1,
.signalg = SGN_ALG_DES_MAC_MD5,
.sealalg = SEAL_ALG_DES,
.keybytes = 7,
.keylength = 8,
.blocksize = 8,
.conflen = 8,
.cksumlength = 8,
.keyed_cksum = 0,
},
#endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */
/*
* 3DES
*/
@ -62,100 +70,291 @@ static const struct gss_krb5_enctype supported_gss_krb5_enctypes[] = {
.name = "des3-hmac-sha1",
.encrypt_name = "cbc(des3_ede)",
.cksum_name = "hmac(sha1)",
.encrypt = krb5_encrypt,
.decrypt = krb5_decrypt,
.mk_key = gss_krb5_des3_make_key,
.import_ctx = gss_krb5_import_ctx_v1,
.derive_key = krb5_derive_key_v1,
.get_mic = gss_krb5_get_mic_v1,
.verify_mic = gss_krb5_verify_mic_v1,
.wrap = gss_krb5_wrap_v1,
.unwrap = gss_krb5_unwrap_v1,
.signalg = SGN_ALG_HMAC_SHA1_DES3_KD,
.sealalg = SEAL_ALG_DES3KD,
.keybytes = 21,
.keylength = 24,
.blocksize = 8,
.conflen = 8,
.cksumlength = 20,
.keyed_cksum = 1,
},
#endif
#if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1)
/*
* AES128
* AES-128 with SHA-1 (RFC 3962)
*/
{
.etype = ENCTYPE_AES128_CTS_HMAC_SHA1_96,
.ctype = CKSUMTYPE_HMAC_SHA1_96_AES128,
.name = "aes128-cts",
.encrypt_name = "cts(cbc(aes))",
.aux_cipher = "cbc(aes)",
.cksum_name = "hmac(sha1)",
.encrypt = krb5_encrypt,
.decrypt = krb5_decrypt,
.mk_key = gss_krb5_aes_make_key,
.encrypt_v2 = gss_krb5_aes_encrypt,
.decrypt_v2 = gss_krb5_aes_decrypt,
.import_ctx = gss_krb5_import_ctx_v2,
.derive_key = krb5_derive_key_v2,
.encrypt = gss_krb5_aes_encrypt,
.decrypt = gss_krb5_aes_decrypt,
.get_mic = gss_krb5_get_mic_v2,
.verify_mic = gss_krb5_verify_mic_v2,
.wrap = gss_krb5_wrap_v2,
.unwrap = gss_krb5_unwrap_v2,
.signalg = -1,
.sealalg = -1,
.keybytes = 16,
.keylength = 16,
.blocksize = 16,
.conflen = 16,
.cksumlength = 12,
.keylength = BITS2OCTETS(128),
.Kc_length = BITS2OCTETS(128),
.Ke_length = BITS2OCTETS(128),
.Ki_length = BITS2OCTETS(128),
.cksumlength = BITS2OCTETS(96),
.keyed_cksum = 1,
},
/*
* AES256
* AES-256 with SHA-1 (RFC 3962)
*/
{
.etype = ENCTYPE_AES256_CTS_HMAC_SHA1_96,
.ctype = CKSUMTYPE_HMAC_SHA1_96_AES256,
.name = "aes256-cts",
.encrypt_name = "cts(cbc(aes))",
.aux_cipher = "cbc(aes)",
.cksum_name = "hmac(sha1)",
.encrypt = krb5_encrypt,
.decrypt = krb5_decrypt,
.mk_key = gss_krb5_aes_make_key,
.encrypt_v2 = gss_krb5_aes_encrypt,
.decrypt_v2 = gss_krb5_aes_decrypt,
.import_ctx = gss_krb5_import_ctx_v2,
.derive_key = krb5_derive_key_v2,
.encrypt = gss_krb5_aes_encrypt,
.decrypt = gss_krb5_aes_decrypt,
.get_mic = gss_krb5_get_mic_v2,
.verify_mic = gss_krb5_verify_mic_v2,
.wrap = gss_krb5_wrap_v2,
.unwrap = gss_krb5_unwrap_v2,
.signalg = -1,
.sealalg = -1,
.keybytes = 32,
.keylength = 32,
.blocksize = 16,
.conflen = 16,
.cksumlength = 12,
.keylength = BITS2OCTETS(256),
.Kc_length = BITS2OCTETS(256),
.Ke_length = BITS2OCTETS(256),
.Ki_length = BITS2OCTETS(256),
.cksumlength = BITS2OCTETS(96),
.keyed_cksum = 1,
},
#endif
#if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA)
/*
* Camellia-128 with CMAC (RFC 6803)
*/
{
.etype = ENCTYPE_CAMELLIA128_CTS_CMAC,
.ctype = CKSUMTYPE_CMAC_CAMELLIA128,
.name = "camellia128-cts-cmac",
.encrypt_name = "cts(cbc(camellia))",
.aux_cipher = "cbc(camellia)",
.cksum_name = "cmac(camellia)",
.cksumlength = BITS2OCTETS(128),
.keyed_cksum = 1,
.keylength = BITS2OCTETS(128),
.Kc_length = BITS2OCTETS(128),
.Ke_length = BITS2OCTETS(128),
.Ki_length = BITS2OCTETS(128),
.import_ctx = gss_krb5_import_ctx_v2,
.derive_key = krb5_kdf_feedback_cmac,
.encrypt = gss_krb5_aes_encrypt,
.decrypt = gss_krb5_aes_decrypt,
.get_mic = gss_krb5_get_mic_v2,
.verify_mic = gss_krb5_verify_mic_v2,
.wrap = gss_krb5_wrap_v2,
.unwrap = gss_krb5_unwrap_v2,
},
/*
* Camellia-256 with CMAC (RFC 6803)
*/
{
.etype = ENCTYPE_CAMELLIA256_CTS_CMAC,
.ctype = CKSUMTYPE_CMAC_CAMELLIA256,
.name = "camellia256-cts-cmac",
.encrypt_name = "cts(cbc(camellia))",
.aux_cipher = "cbc(camellia)",
.cksum_name = "cmac(camellia)",
.cksumlength = BITS2OCTETS(128),
.keyed_cksum = 1,
.keylength = BITS2OCTETS(256),
.Kc_length = BITS2OCTETS(256),
.Ke_length = BITS2OCTETS(256),
.Ki_length = BITS2OCTETS(256),
.import_ctx = gss_krb5_import_ctx_v2,
.derive_key = krb5_kdf_feedback_cmac,
.encrypt = gss_krb5_aes_encrypt,
.decrypt = gss_krb5_aes_decrypt,
.get_mic = gss_krb5_get_mic_v2,
.verify_mic = gss_krb5_verify_mic_v2,
.wrap = gss_krb5_wrap_v2,
.unwrap = gss_krb5_unwrap_v2,
},
#endif
#if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2)
/*
* AES-128 with SHA-256 (RFC 8009)
*/
{
.etype = ENCTYPE_AES128_CTS_HMAC_SHA256_128,
.ctype = CKSUMTYPE_HMAC_SHA256_128_AES128,
.name = "aes128-cts-hmac-sha256-128",
.encrypt_name = "cts(cbc(aes))",
.aux_cipher = "cbc(aes)",
.cksum_name = "hmac(sha256)",
.cksumlength = BITS2OCTETS(128),
.keyed_cksum = 1,
.keylength = BITS2OCTETS(128),
.Kc_length = BITS2OCTETS(128),
.Ke_length = BITS2OCTETS(128),
.Ki_length = BITS2OCTETS(128),
.import_ctx = gss_krb5_import_ctx_v2,
.derive_key = krb5_kdf_hmac_sha2,
.encrypt = krb5_etm_encrypt,
.decrypt = krb5_etm_decrypt,
.get_mic = gss_krb5_get_mic_v2,
.verify_mic = gss_krb5_verify_mic_v2,
.wrap = gss_krb5_wrap_v2,
.unwrap = gss_krb5_unwrap_v2,
},
/*
* AES-256 with SHA-384 (RFC 8009)
*/
{
.etype = ENCTYPE_AES256_CTS_HMAC_SHA384_192,
.ctype = CKSUMTYPE_HMAC_SHA384_192_AES256,
.name = "aes256-cts-hmac-sha384-192",
.encrypt_name = "cts(cbc(aes))",
.aux_cipher = "cbc(aes)",
.cksum_name = "hmac(sha384)",
.cksumlength = BITS2OCTETS(192),
.keyed_cksum = 1,
.keylength = BITS2OCTETS(256),
.Kc_length = BITS2OCTETS(192),
.Ke_length = BITS2OCTETS(256),
.Ki_length = BITS2OCTETS(192),
.import_ctx = gss_krb5_import_ctx_v2,
.derive_key = krb5_kdf_hmac_sha2,
.encrypt = krb5_etm_encrypt,
.decrypt = krb5_etm_decrypt,
.get_mic = gss_krb5_get_mic_v2,
.verify_mic = gss_krb5_verify_mic_v2,
.wrap = gss_krb5_wrap_v2,
.unwrap = gss_krb5_unwrap_v2,
},
#endif
};
static const int num_supported_enctypes =
ARRAY_SIZE(supported_gss_krb5_enctypes);
/*
* The list of advertised enctypes is specified in order of most
* preferred to least.
*/
static char gss_krb5_enctype_priority_list[64];
static int
supported_gss_krb5_enctype(int etype)
static void gss_krb5_prepare_enctype_priority_list(void)
{
int i;
for (i = 0; i < num_supported_enctypes; i++)
if (supported_gss_krb5_enctypes[i].etype == etype)
return 1;
return 0;
static const u32 gss_krb5_enctypes[] = {
#if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA2)
ENCTYPE_AES256_CTS_HMAC_SHA384_192,
ENCTYPE_AES128_CTS_HMAC_SHA256_128,
#endif
#if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_CAMELLIA)
ENCTYPE_CAMELLIA256_CTS_CMAC,
ENCTYPE_CAMELLIA128_CTS_CMAC,
#endif
#if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_AES_SHA1)
ENCTYPE_AES256_CTS_HMAC_SHA1_96,
ENCTYPE_AES128_CTS_HMAC_SHA1_96,
#endif
#if defined(CONFIG_RPCSEC_GSS_KRB5_ENCTYPES_DES)
ENCTYPE_DES3_CBC_SHA1,
ENCTYPE_DES_CBC_MD5,
ENCTYPE_DES_CBC_CRC,
ENCTYPE_DES_CBC_MD4,
#endif
};
size_t total, i;
char buf[16];
char *sep;
int n;
sep = "";
gss_krb5_enctype_priority_list[0] = '\0';
for (total = 0, i = 0; i < ARRAY_SIZE(gss_krb5_enctypes); i++) {
n = sprintf(buf, "%s%u", sep, gss_krb5_enctypes[i]);
if (n < 0)
break;
if (total + n >= sizeof(gss_krb5_enctype_priority_list))
break;
strcat(gss_krb5_enctype_priority_list, buf);
sep = ",";
total += n;
}
}
static const struct gss_krb5_enctype *
get_gss_krb5_enctype(int etype)
/**
* gss_krb5_lookup_enctype - Retrieve profile information for a given enctype
* @etype: ENCTYPE value
*
* Returns a pointer to a gss_krb5_enctype structure, or NULL if no
* matching etype is found.
*/
VISIBLE_IF_KUNIT
const struct gss_krb5_enctype *gss_krb5_lookup_enctype(u32 etype)
{
int i;
for (i = 0; i < num_supported_enctypes; i++)
size_t i;
for (i = 0; i < ARRAY_SIZE(supported_gss_krb5_enctypes); i++)
if (supported_gss_krb5_enctypes[i].etype == etype)
return &supported_gss_krb5_enctypes[i];
return NULL;
}
EXPORT_SYMBOL_IF_KUNIT(gss_krb5_lookup_enctype);
static struct crypto_sync_skcipher *
gss_krb5_alloc_cipher_v1(struct krb5_ctx *ctx, struct xdr_netobj *key)
{
struct crypto_sync_skcipher *tfm;
tfm = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0);
if (IS_ERR(tfm))
return NULL;
if (crypto_sync_skcipher_setkey(tfm, key->data, key->len)) {
crypto_free_sync_skcipher(tfm);
return NULL;
}
return tfm;
}
static inline const void *
get_key(const void *p, const void *end,
struct krb5_ctx *ctx, struct crypto_sync_skcipher **res)
{
struct crypto_sync_skcipher *tfm;
struct xdr_netobj key;
int alg;
p = simple_get_bytes(p, end, &alg, sizeof(alg));
if (IS_ERR(p))
goto out_err;
switch (alg) {
case ENCTYPE_DES_CBC_CRC:
case ENCTYPE_DES_CBC_MD4:
@ -164,37 +363,26 @@ get_key(const void *p, const void *end,
alg = ENCTYPE_DES_CBC_RAW;
break;
}
if (!supported_gss_krb5_enctype(alg)) {
printk(KERN_WARNING "gss_kerberos_mech: unsupported "
"encryption key algorithm %d\n", alg);
p = ERR_PTR(-EINVAL);
goto out_err;
if (!gss_krb5_lookup_enctype(alg)) {
pr_warn("gss_krb5: unsupported enctype: %d\n", alg);
goto out_err_inval;
}
p = simple_get_netobj(p, end, &key);
if (IS_ERR(p))
goto out_err;
*res = crypto_alloc_sync_skcipher(ctx->gk5e->encrypt_name, 0, 0);
if (IS_ERR(*res)) {
printk(KERN_WARNING "gss_kerberos_mech: unable to initialize "
"crypto algorithm %s\n", ctx->gk5e->encrypt_name);
*res = NULL;
goto out_err_free_key;
}
if (crypto_sync_skcipher_setkey(*res, key.data, key.len)) {
printk(KERN_WARNING "gss_kerberos_mech: error setting key for "
"crypto algorithm %s\n", ctx->gk5e->encrypt_name);
goto out_err_free_tfm;
}
tfm = gss_krb5_alloc_cipher_v1(ctx, &key);
kfree(key.data);
if (!tfm) {
pr_warn("gss_krb5: failed to initialize cipher '%s'\n",
ctx->gk5e->encrypt_name);
goto out_err_inval;
}
*res = tfm;
return p;
out_err_free_tfm:
crypto_free_sync_skcipher(*res);
out_err_free_key:
kfree(key.data);
out_err_inval:
p = ERR_PTR(-EINVAL);
out_err:
return p;
@ -214,7 +402,7 @@ gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx)
/* Old format supports only DES! Any other enctype uses new format */
ctx->enctype = ENCTYPE_DES_CBC_RAW;
ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
ctx->gk5e = gss_krb5_lookup_enctype(ctx->enctype);
if (ctx->gk5e == NULL) {
p = ERR_PTR(-EINVAL);
goto out_err;
@ -278,70 +466,34 @@ out_err:
return PTR_ERR(p);
}
static struct crypto_sync_skcipher *
context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key)
#if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED)
static int
gss_krb5_import_ctx_des(struct krb5_ctx *ctx, gfp_t gfp_mask)
{
struct crypto_sync_skcipher *cp;
cp = crypto_alloc_sync_skcipher(cname, 0, 0);
if (IS_ERR(cp)) {
dprintk("gss_kerberos_mech: unable to initialize "
"crypto algorithm %s\n", cname);
return NULL;
}
if (crypto_sync_skcipher_setkey(cp, key, ctx->gk5e->keylength)) {
dprintk("gss_kerberos_mech: error setting key for "
"crypto algorithm %s\n", cname);
crypto_free_sync_skcipher(cp);
return NULL;
}
return cp;
}
static inline void
set_cdata(u8 cdata[GSS_KRB5_K5CLENGTH], u32 usage, u8 seed)
{
cdata[0] = (usage>>24)&0xff;
cdata[1] = (usage>>16)&0xff;
cdata[2] = (usage>>8)&0xff;
cdata[3] = usage&0xff;
cdata[4] = seed;
return -EINVAL;
}
static int
context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask)
gss_krb5_import_ctx_v1(struct krb5_ctx *ctx, gfp_t gfp_mask)
{
struct xdr_netobj c, keyin, keyout;
u8 cdata[GSS_KRB5_K5CLENGTH];
u32 err;
c.len = GSS_KRB5_K5CLENGTH;
c.data = cdata;
struct xdr_netobj keyin, keyout;
keyin.data = ctx->Ksess;
keyin.len = ctx->gk5e->keylength;
keyout.len = ctx->gk5e->keylength;
/* seq uses the raw key */
ctx->seq = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name,
ctx->Ksess);
ctx->seq = gss_krb5_alloc_cipher_v1(ctx, &keyin);
if (ctx->seq == NULL)
goto out_err;
ctx->enc = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name,
ctx->Ksess);
ctx->enc = gss_krb5_alloc_cipher_v1(ctx, &keyin);
if (ctx->enc == NULL)
goto out_free_seq;
/* derive cksum */
set_cdata(cdata, KG_USAGE_SIGN, KEY_USAGE_SEED_CHECKSUM);
keyout.data = ctx->cksum;
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
if (err) {
dprintk("%s: Error %d deriving cksum key\n",
__func__, err);
keyout.len = ctx->gk5e->keylength;
if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_SIGN,
KEY_USAGE_SEED_CHECKSUM, gfp_mask))
goto out_free_enc;
}
return 0;
@ -352,118 +504,140 @@ out_free_seq:
out_err:
return -EINVAL;
}
#endif
#if defined(CONFIG_RPCSEC_GSS_KRB5_CRYPTOSYSTEM)
static struct crypto_sync_skcipher *
gss_krb5_alloc_cipher_v2(const char *cname, const struct xdr_netobj *key)
{
struct crypto_sync_skcipher *tfm;
tfm = crypto_alloc_sync_skcipher(cname, 0, 0);
if (IS_ERR(tfm))
return NULL;
if (crypto_sync_skcipher_setkey(tfm, key->data, key->len)) {
crypto_free_sync_skcipher(tfm);
return NULL;
}
return tfm;
}
static struct crypto_ahash *
gss_krb5_alloc_hash_v2(struct krb5_ctx *kctx, const struct xdr_netobj *key)
{
struct crypto_ahash *tfm;
tfm = crypto_alloc_ahash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
return NULL;
if (crypto_ahash_setkey(tfm, key->data, key->len)) {
crypto_free_ahash(tfm);
return NULL;
}
return tfm;
}
static int
context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask)
gss_krb5_import_ctx_v2(struct krb5_ctx *ctx, gfp_t gfp_mask)
{
struct xdr_netobj c, keyin, keyout;
u8 cdata[GSS_KRB5_K5CLENGTH];
u32 err;
struct xdr_netobj keyin = {
.len = ctx->gk5e->keylength,
.data = ctx->Ksess,
};
struct xdr_netobj keyout;
int ret = -EINVAL;
c.len = GSS_KRB5_K5CLENGTH;
c.data = cdata;
keyin.data = ctx->Ksess;
keyin.len = ctx->gk5e->keylength;
keyout.len = ctx->gk5e->keylength;
keyout.data = kmalloc(GSS_KRB5_MAX_KEYLEN, gfp_mask);
if (!keyout.data)
return -ENOMEM;
/* initiator seal encryption */
set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_ENCRYPTION);
keyout.data = ctx->initiator_seal;
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
if (err) {
dprintk("%s: Error %d deriving initiator_seal key\n",
__func__, err);
goto out_err;
}
ctx->initiator_enc = context_v2_alloc_cipher(ctx,
ctx->gk5e->encrypt_name,
ctx->initiator_seal);
keyout.len = ctx->gk5e->Ke_length;
if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_INITIATOR_SEAL,
KEY_USAGE_SEED_ENCRYPTION, gfp_mask))
goto out;
ctx->initiator_enc = gss_krb5_alloc_cipher_v2(ctx->gk5e->encrypt_name,
&keyout);
if (ctx->initiator_enc == NULL)
goto out_err;
goto out;
if (ctx->gk5e->aux_cipher) {
ctx->initiator_enc_aux =
gss_krb5_alloc_cipher_v2(ctx->gk5e->aux_cipher,
&keyout);
if (ctx->initiator_enc_aux == NULL)
goto out_free;
}
/* acceptor seal encryption */
set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_ENCRYPTION);
keyout.data = ctx->acceptor_seal;
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
if (err) {
dprintk("%s: Error %d deriving acceptor_seal key\n",
__func__, err);
goto out_free_initiator_enc;
}
ctx->acceptor_enc = context_v2_alloc_cipher(ctx,
ctx->gk5e->encrypt_name,
ctx->acceptor_seal);
if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_ACCEPTOR_SEAL,
KEY_USAGE_SEED_ENCRYPTION, gfp_mask))
goto out_free;
ctx->acceptor_enc = gss_krb5_alloc_cipher_v2(ctx->gk5e->encrypt_name,
&keyout);
if (ctx->acceptor_enc == NULL)
goto out_free_initiator_enc;
goto out_free;
if (ctx->gk5e->aux_cipher) {
ctx->acceptor_enc_aux =
gss_krb5_alloc_cipher_v2(ctx->gk5e->aux_cipher,
&keyout);
if (ctx->acceptor_enc_aux == NULL)
goto out_free;
}
/* initiator sign checksum */
set_cdata(cdata, KG_USAGE_INITIATOR_SIGN, KEY_USAGE_SEED_CHECKSUM);
keyout.data = ctx->initiator_sign;
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
if (err) {
dprintk("%s: Error %d deriving initiator_sign key\n",
__func__, err);
goto out_free_acceptor_enc;
}
keyout.len = ctx->gk5e->Kc_length;
if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_INITIATOR_SIGN,
KEY_USAGE_SEED_CHECKSUM, gfp_mask))
goto out_free;
ctx->initiator_sign = gss_krb5_alloc_hash_v2(ctx, &keyout);
if (ctx->initiator_sign == NULL)
goto out_free;
/* acceptor sign checksum */
set_cdata(cdata, KG_USAGE_ACCEPTOR_SIGN, KEY_USAGE_SEED_CHECKSUM);
keyout.data = ctx->acceptor_sign;
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
if (err) {
dprintk("%s: Error %d deriving acceptor_sign key\n",
__func__, err);
goto out_free_acceptor_enc;
}
if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_ACCEPTOR_SIGN,
KEY_USAGE_SEED_CHECKSUM, gfp_mask))
goto out_free;
ctx->acceptor_sign = gss_krb5_alloc_hash_v2(ctx, &keyout);
if (ctx->acceptor_sign == NULL)
goto out_free;
/* initiator seal integrity */
set_cdata(cdata, KG_USAGE_INITIATOR_SEAL, KEY_USAGE_SEED_INTEGRITY);
keyout.data = ctx->initiator_integ;
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
if (err) {
dprintk("%s: Error %d deriving initiator_integ key\n",
__func__, err);
goto out_free_acceptor_enc;
}
keyout.len = ctx->gk5e->Ki_length;
if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_INITIATOR_SEAL,
KEY_USAGE_SEED_INTEGRITY, gfp_mask))
goto out_free;
ctx->initiator_integ = gss_krb5_alloc_hash_v2(ctx, &keyout);
if (ctx->initiator_integ == NULL)
goto out_free;
/* acceptor seal integrity */
set_cdata(cdata, KG_USAGE_ACCEPTOR_SEAL, KEY_USAGE_SEED_INTEGRITY);
keyout.data = ctx->acceptor_integ;
err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask);
if (err) {
dprintk("%s: Error %d deriving acceptor_integ key\n",
__func__, err);
goto out_free_acceptor_enc;
}
if (krb5_derive_key(ctx, &keyin, &keyout, KG_USAGE_ACCEPTOR_SEAL,
KEY_USAGE_SEED_INTEGRITY, gfp_mask))
goto out_free;
ctx->acceptor_integ = gss_krb5_alloc_hash_v2(ctx, &keyout);
if (ctx->acceptor_integ == NULL)
goto out_free;
switch (ctx->enctype) {
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
ctx->initiator_enc_aux =
context_v2_alloc_cipher(ctx, "cbc(aes)",
ctx->initiator_seal);
if (ctx->initiator_enc_aux == NULL)
goto out_free_acceptor_enc;
ctx->acceptor_enc_aux =
context_v2_alloc_cipher(ctx, "cbc(aes)",
ctx->acceptor_seal);
if (ctx->acceptor_enc_aux == NULL) {
crypto_free_sync_skcipher(ctx->initiator_enc_aux);
goto out_free_acceptor_enc;
}
}
ret = 0;
out:
kfree_sensitive(keyout.data);
return ret;
return 0;
out_free_acceptor_enc:
out_free:
crypto_free_ahash(ctx->acceptor_integ);
crypto_free_ahash(ctx->initiator_integ);
crypto_free_ahash(ctx->acceptor_sign);
crypto_free_ahash(ctx->initiator_sign);
crypto_free_sync_skcipher(ctx->acceptor_enc_aux);
crypto_free_sync_skcipher(ctx->acceptor_enc);
out_free_initiator_enc:
crypto_free_sync_skcipher(ctx->initiator_enc_aux);
crypto_free_sync_skcipher(ctx->initiator_enc);
out_err:
return -EINVAL;
goto out;
}
#endif
static int
gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
gfp_t gfp_mask)
@ -500,7 +674,7 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
/* Map ENCTYPE_DES3_CBC_SHA1 to ENCTYPE_DES3_CBC_RAW */
if (ctx->enctype == ENCTYPE_DES3_CBC_SHA1)
ctx->enctype = ENCTYPE_DES3_CBC_RAW;
ctx->gk5e = get_gss_krb5_enctype(ctx->enctype);
ctx->gk5e = gss_krb5_lookup_enctype(ctx->enctype);
if (ctx->gk5e == NULL) {
dprintk("gss_kerberos_mech: unsupported krb5 enctype %u\n",
ctx->enctype);
@ -526,25 +700,15 @@ gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx,
}
ctx->mech_used.len = gss_kerberos_mech.gm_oid.len;
switch (ctx->enctype) {
case ENCTYPE_DES3_CBC_RAW:
return context_derive_keys_des3(ctx, gfp_mask);
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
return context_derive_keys_new(ctx, gfp_mask);
default:
return -EINVAL;
}
return ctx->gk5e->import_ctx(ctx, gfp_mask);
out_err:
return PTR_ERR(p);
}
static int
gss_import_sec_context_kerberos(const void *p, size_t len,
struct gss_ctx *ctx_id,
time64_t *endtime,
gfp_t gfp_mask)
gss_krb5_import_sec_context(const void *p, size_t len, struct gss_ctx *ctx_id,
time64_t *endtime, gfp_t gfp_mask)
{
const void *end = (const void *)((const char *)p + len);
struct krb5_ctx *ctx;
@ -558,20 +722,21 @@ gss_import_sec_context_kerberos(const void *p, size_t len,
ret = gss_import_v1_context(p, end, ctx);
else
ret = gss_import_v2_context(p, end, ctx, gfp_mask);
if (ret == 0) {
ctx_id->internal_ctx_id = ctx;
if (endtime)
*endtime = ctx->endtime;
} else
memzero_explicit(&ctx->Ksess, sizeof(ctx->Ksess));
if (ret) {
kfree(ctx);
return ret;
}
dprintk("RPC: %s: returning %d\n", __func__, ret);
return ret;
ctx_id->internal_ctx_id = ctx;
if (endtime)
*endtime = ctx->endtime;
return 0;
}
static void
gss_delete_sec_context_kerberos(void *internal_ctx) {
gss_krb5_delete_sec_context(void *internal_ctx)
{
struct krb5_ctx *kctx = internal_ctx;
crypto_free_sync_skcipher(kctx->seq);
@ -580,17 +745,105 @@ gss_delete_sec_context_kerberos(void *internal_ctx) {
crypto_free_sync_skcipher(kctx->initiator_enc);
crypto_free_sync_skcipher(kctx->acceptor_enc_aux);
crypto_free_sync_skcipher(kctx->initiator_enc_aux);
crypto_free_ahash(kctx->acceptor_sign);
crypto_free_ahash(kctx->initiator_sign);
crypto_free_ahash(kctx->acceptor_integ);
crypto_free_ahash(kctx->initiator_integ);
kfree(kctx->mech_used.data);
kfree(kctx);
}
/**
* gss_krb5_get_mic - get_mic for the Kerberos GSS mechanism
* @gctx: GSS context
* @text: plaintext to checksum
* @token: buffer into which to write the computed checksum
*
* Return values:
* %GSS_S_COMPLETE - success, and @token is filled in
* %GSS_S_FAILURE - checksum could not be generated
* %GSS_S_CONTEXT_EXPIRED - Kerberos context is no longer valid
*/
static u32 gss_krb5_get_mic(struct gss_ctx *gctx, struct xdr_buf *text,
struct xdr_netobj *token)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
return kctx->gk5e->get_mic(kctx, text, token);
}
/**
* gss_krb5_verify_mic - verify_mic for the Kerberos GSS mechanism
* @gctx: GSS context
* @message_buffer: plaintext to check
* @read_token: received checksum to check
*
* Return values:
* %GSS_S_COMPLETE - computed and received checksums match
* %GSS_S_DEFECTIVE_TOKEN - received checksum is not valid
* %GSS_S_BAD_SIG - computed and received checksums do not match
* %GSS_S_FAILURE - received checksum could not be checked
* %GSS_S_CONTEXT_EXPIRED - Kerberos context is no longer valid
*/
static u32 gss_krb5_verify_mic(struct gss_ctx *gctx,
struct xdr_buf *message_buffer,
struct xdr_netobj *read_token)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
return kctx->gk5e->verify_mic(kctx, message_buffer, read_token);
}
/**
* gss_krb5_wrap - gss_wrap for the Kerberos GSS mechanism
* @gctx: initialized GSS context
* @offset: byte offset in @buf to start writing the cipher text
* @buf: OUT: send buffer
* @pages: plaintext to wrap
*
* Return values:
* %GSS_S_COMPLETE - success, @buf has been updated
* %GSS_S_FAILURE - @buf could not be wrapped
* %GSS_S_CONTEXT_EXPIRED - Kerberos context is no longer valid
*/
static u32 gss_krb5_wrap(struct gss_ctx *gctx, int offset,
struct xdr_buf *buf, struct page **pages)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
return kctx->gk5e->wrap(kctx, offset, buf, pages);
}
/**
* gss_krb5_unwrap - gss_unwrap for the Kerberos GSS mechanism
* @gctx: initialized GSS context
* @offset: starting byte offset into @buf
* @len: size of ciphertext to unwrap
* @buf: ciphertext to unwrap
*
* Return values:
* %GSS_S_COMPLETE - success, @buf has been updated
* %GSS_S_DEFECTIVE_TOKEN - received blob is not valid
* %GSS_S_BAD_SIG - computed and received checksums do not match
* %GSS_S_FAILURE - @buf could not be unwrapped
* %GSS_S_CONTEXT_EXPIRED - Kerberos context is no longer valid
*/
static u32 gss_krb5_unwrap(struct gss_ctx *gctx, int offset,
int len, struct xdr_buf *buf)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
return kctx->gk5e->unwrap(kctx, offset, len, buf,
&gctx->slack, &gctx->align);
}
static const struct gss_api_ops gss_kerberos_ops = {
.gss_import_sec_context = gss_import_sec_context_kerberos,
.gss_get_mic = gss_get_mic_kerberos,
.gss_verify_mic = gss_verify_mic_kerberos,
.gss_wrap = gss_wrap_kerberos,
.gss_unwrap = gss_unwrap_kerberos,
.gss_delete_sec_context = gss_delete_sec_context_kerberos,
.gss_import_sec_context = gss_krb5_import_sec_context,
.gss_get_mic = gss_krb5_get_mic,
.gss_verify_mic = gss_krb5_verify_mic,
.gss_wrap = gss_krb5_wrap,
.gss_unwrap = gss_krb5_unwrap,
.gss_delete_sec_context = gss_krb5_delete_sec_context,
};
static struct pf_desc gss_kerberos_pfs[] = {
@ -631,13 +884,14 @@ static struct gss_api_mech gss_kerberos_mech = {
.gm_ops = &gss_kerberos_ops,
.gm_pf_num = ARRAY_SIZE(gss_kerberos_pfs),
.gm_pfs = gss_kerberos_pfs,
.gm_upcall_enctypes = KRB5_SUPPORTED_ENCTYPES,
.gm_upcall_enctypes = gss_krb5_enctype_priority_list,
};
static int __init init_kerberos_module(void)
{
int status;
gss_krb5_prepare_enctype_priority_list();
status = gss_mech_register(&gss_kerberos_mech);
if (status)
printk("Failed to register kerberos gss mechanism!\n");

View File

@ -65,10 +65,14 @@
#include <linux/crypto.h>
#include <linux/atomic.h>
#include "gss_krb5_internal.h"
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
#if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED)
static void *
setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token)
{
@ -95,37 +99,9 @@ setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token)
return krb5_hdr;
}
static void *
setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token)
{
u16 *ptr;
void *krb5_hdr;
u8 *p, flags = 0x00;
if ((ctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
flags |= 0x01;
if (ctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
flags |= 0x04;
/* Per rfc 4121, sec 4.2.6.1, there is no header,
* just start the token */
krb5_hdr = ptr = (u16 *)token->data;
*ptr++ = KG2_TOK_MIC;
p = (u8 *)ptr;
*p++ = flags;
*p++ = 0xff;
ptr = (u16 *)p;
*ptr++ = 0xffff;
*ptr = 0xffff;
token->len = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength;
return krb5_hdr;
}
static u32
gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
struct xdr_netobj *token)
u32
gss_krb5_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
struct xdr_netobj *token)
{
char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
@ -162,18 +138,50 @@ gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text,
return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
}
static u32
gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
struct xdr_netobj *token)
#endif
static void *
setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token)
{
char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
struct xdr_netobj cksumobj = { .len = sizeof(cksumdata),
.data = cksumdata};
u16 *ptr;
void *krb5_hdr;
u8 *p, flags = 0x00;
if ((ctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
flags |= 0x01;
if (ctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY)
flags |= 0x04;
/* Per rfc 4121, sec 4.2.6.1, there is no header,
* just start the token.
*/
krb5_hdr = (u16 *)token->data;
ptr = krb5_hdr;
*ptr++ = KG2_TOK_MIC;
p = (u8 *)ptr;
*p++ = flags;
*p++ = 0xff;
ptr = (u16 *)p;
*ptr++ = 0xffff;
*ptr = 0xffff;
token->len = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength;
return krb5_hdr;
}
u32
gss_krb5_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
struct xdr_netobj *token)
{
struct crypto_ahash *tfm = ctx->initiate ?
ctx->initiator_sign : ctx->acceptor_sign;
struct xdr_netobj cksumobj = {
.len = ctx->gk5e->cksumlength,
};
__be64 seq_send_be64;
void *krb5_hdr;
time64_t now;
u8 *cksumkey;
unsigned int cksum_usage;
__be64 seq_send_be64;
dprintk("RPC: %s\n", __func__);
@ -184,39 +192,11 @@ gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text,
seq_send_be64 = cpu_to_be64(atomic64_fetch_inc(&ctx->seq_send64));
memcpy(krb5_hdr + 8, (char *) &seq_send_be64, 8);
if (ctx->initiate) {
cksumkey = ctx->initiator_sign;
cksum_usage = KG_USAGE_INITIATOR_SIGN;
} else {
cksumkey = ctx->acceptor_sign;
cksum_usage = KG_USAGE_ACCEPTOR_SIGN;
}
if (make_checksum_v2(ctx, krb5_hdr, GSS_KRB5_TOK_HDR_LEN,
text, 0, cksumkey, cksum_usage, &cksumobj))
cksumobj.data = krb5_hdr + GSS_KRB5_TOK_HDR_LEN;
if (gss_krb5_checksum(tfm, krb5_hdr, GSS_KRB5_TOK_HDR_LEN,
text, 0, &cksumobj))
return GSS_S_FAILURE;
memcpy(krb5_hdr + GSS_KRB5_TOK_HDR_LEN, cksumobj.data, cksumobj.len);
now = ktime_get_real_seconds();
return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
}
u32
gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
struct xdr_netobj *token)
{
struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
switch (ctx->enctype) {
default:
BUG();
case ENCTYPE_DES_CBC_RAW:
case ENCTYPE_DES3_CBC_RAW:
return gss_get_mic_v1(ctx, text, token);
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
return gss_get_mic_v2(ctx, text, token);
}
}

View File

@ -35,6 +35,8 @@
#include <linux/types.h>
#include <linux/sunrpc/gss_krb5.h>
#include "gss_krb5_internal.h"
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif

File diff suppressed because it is too large Load Diff

View File

@ -57,22 +57,25 @@
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <crypto/algapi.h>
#include <linux/types.h>
#include <linux/jiffies.h>
#include <linux/sunrpc/gss_krb5.h>
#include <linux/crypto.h>
#include "gss_krb5_internal.h"
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
#if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED)
/* read_token is a mic token, and message_buffer is the data that the mic was
* supposedly taken over. */
static u32
gss_verify_mic_v1(struct krb5_ctx *ctx,
struct xdr_buf *message_buffer, struct xdr_netobj *read_token)
u32
gss_krb5_verify_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *message_buffer,
struct xdr_netobj *read_token)
{
int signalg;
int sealalg;
@ -141,21 +144,24 @@ gss_verify_mic_v1(struct krb5_ctx *ctx,
return GSS_S_COMPLETE;
}
#endif
static u32
gss_verify_mic_v2(struct krb5_ctx *ctx,
struct xdr_buf *message_buffer, struct xdr_netobj *read_token)
u32
gss_krb5_verify_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *message_buffer,
struct xdr_netobj *read_token)
{
struct crypto_ahash *tfm = ctx->initiate ?
ctx->acceptor_sign : ctx->initiator_sign;
char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
struct xdr_netobj cksumobj = {.len = sizeof(cksumdata),
.data = cksumdata};
time64_t now;
struct xdr_netobj cksumobj = {
.len = ctx->gk5e->cksumlength,
.data = cksumdata,
};
u8 *ptr = read_token->data;
u8 *cksumkey;
__be16 be16_ptr;
time64_t now;
u8 flags;
int i;
unsigned int cksum_usage;
__be16 be16_ptr;
dprintk("RPC: %s\n", __func__);
@ -177,16 +183,8 @@ gss_verify_mic_v2(struct krb5_ctx *ctx,
if (ptr[i] != 0xff)
return GSS_S_DEFECTIVE_TOKEN;
if (ctx->initiate) {
cksumkey = ctx->acceptor_sign;
cksum_usage = KG_USAGE_ACCEPTOR_SIGN;
} else {
cksumkey = ctx->initiator_sign;
cksum_usage = KG_USAGE_INITIATOR_SIGN;
}
if (make_checksum_v2(ctx, ptr, GSS_KRB5_TOK_HDR_LEN, message_buffer, 0,
cksumkey, cksum_usage, &cksumobj))
if (gss_krb5_checksum(tfm, ptr, GSS_KRB5_TOK_HDR_LEN,
message_buffer, 0, &cksumobj))
return GSS_S_FAILURE;
if (memcmp(cksumobj.data, ptr + GSS_KRB5_TOK_HDR_LEN,
@ -205,22 +203,3 @@ gss_verify_mic_v2(struct krb5_ctx *ctx,
return GSS_S_COMPLETE;
}
u32
gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
struct xdr_buf *message_buffer,
struct xdr_netobj *read_token)
{
struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
switch (ctx->enctype) {
default:
BUG();
case ENCTYPE_DES_CBC_RAW:
case ENCTYPE_DES3_CBC_RAW:
return gss_verify_mic_v1(ctx, message_buffer, read_token);
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
return gss_verify_mic_v2(ctx, message_buffer, read_token);
}
}

View File

@ -32,13 +32,16 @@
#include <linux/types.h>
#include <linux/jiffies.h>
#include <linux/sunrpc/gss_krb5.h>
#include <linux/random.h>
#include <linux/pagemap.h>
#include "gss_krb5_internal.h"
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
#if defined(CONFIG_RPCSEC_GSS_KRB5_SIMPLIFIED)
static inline int
gss_krb5_padding(int blocksize, int length)
{
@ -113,39 +116,6 @@ out:
return 0;
}
void
gss_krb5_make_confounder(char *p, u32 conflen)
{
static u64 i = 0;
u64 *q = (u64 *)p;
/* rfc1964 claims this should be "random". But all that's really
* necessary is that it be unique. And not even that is necessary in
* our case since our "gssapi" implementation exists only to support
* rpcsec_gss, so we know that the only buffers we will ever encrypt
* already begin with a unique sequence number. Just to hedge my bets
* I'll make a half-hearted attempt at something unique, but ensuring
* uniqueness would mean worrying about atomicity and rollover, and I
* don't care enough. */
/* initialize to random value */
if (i == 0) {
i = get_random_u32();
i = (i << 32) | get_random_u32();
}
switch (conflen) {
case 16:
*q++ = i++;
fallthrough;
case 8:
*q++ = i++;
break;
default:
BUG();
}
}
/* Assumptions: the head and tail of inbuf are ours to play with.
* The pages, however, may be real pages in the page cache and we replace
* them with scratch pages from **pages before writing to them. */
@ -154,9 +124,9 @@ gss_krb5_make_confounder(char *p, u32 conflen)
/* XXX factor out common code with seal/unseal. */
static u32
gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
struct xdr_buf *buf, struct page **pages)
u32
gss_krb5_wrap_v1(struct krb5_ctx *kctx, int offset,
struct xdr_buf *buf, struct page **pages)
{
char cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
struct xdr_netobj md5cksum = {.len = sizeof(cksumdata),
@ -168,7 +138,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
struct page **tmp_pages;
u32 seq_send;
u8 *cksumkey;
u32 conflen = kctx->gk5e->conflen;
u32 conflen = crypto_sync_skcipher_blocksize(kctx->enc);
dprintk("RPC: %s\n", __func__);
@ -211,7 +181,7 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
ptr[6] = 0xff;
ptr[7] = 0xff;
gss_krb5_make_confounder(msg_start, conflen);
krb5_make_confounder(msg_start, conflen);
if (kctx->gk5e->keyed_cksum)
cksumkey = kctx->cksum;
@ -243,10 +213,10 @@ gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
}
static u32
gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
struct xdr_buf *buf, unsigned int *slack,
unsigned int *align)
u32
gss_krb5_unwrap_v1(struct krb5_ctx *kctx, int offset, int len,
struct xdr_buf *buf, unsigned int *slack,
unsigned int *align)
{
int signalg;
int sealalg;
@ -261,7 +231,7 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
void *data_start, *orig_start;
int data_len;
int blocksize;
u32 conflen = kctx->gk5e->conflen;
u32 conflen = crypto_sync_skcipher_blocksize(kctx->enc);
int crypt_offset;
u8 *cksumkey;
unsigned int saved_len = buf->len;
@ -355,6 +325,8 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, int len,
return GSS_S_COMPLETE;
}
#endif
/*
* We can shift data by up to LOCAL_BUF_LEN bytes in a pass. If we need
* to do more than that, we shift repeatedly. Kevin Coffman reports
@ -405,9 +377,9 @@ static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
_rotate_left(&subbuf, shift);
}
static u32
gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
struct xdr_buf *buf, struct page **pages)
u32
gss_krb5_wrap_v2(struct krb5_ctx *kctx, int offset,
struct xdr_buf *buf, struct page **pages)
{
u8 *ptr;
time64_t now;
@ -418,9 +390,6 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
dprintk("RPC: %s\n", __func__);
if (kctx->gk5e->encrypt_v2 == NULL)
return GSS_S_FAILURE;
/* make room for gss token header */
if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
return GSS_S_FAILURE;
@ -448,7 +417,7 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
be64ptr = (__be64 *)be16ptr;
*be64ptr = cpu_to_be64(atomic64_fetch_inc(&kctx->seq_send64));
err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, pages);
err = (*kctx->gk5e->encrypt)(kctx, offset, buf, pages);
if (err)
return err;
@ -456,10 +425,10 @@ gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
}
static u32
gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
struct xdr_buf *buf, unsigned int *slack,
unsigned int *align)
u32
gss_krb5_unwrap_v2(struct krb5_ctx *kctx, int offset, int len,
struct xdr_buf *buf, unsigned int *slack,
unsigned int *align)
{
time64_t now;
u8 *ptr;
@ -473,9 +442,6 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
dprintk("RPC: %s\n", __func__);
if (kctx->gk5e->decrypt_v2 == NULL)
return GSS_S_FAILURE;
ptr = buf->head[0].iov_base + offset;
if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
@ -505,8 +471,8 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
if (rrc != 0)
rotate_left(offset + 16, buf, rrc);
err = (*kctx->gk5e->decrypt_v2)(kctx, offset, len, buf,
&headskip, &tailskip);
err = (*kctx->gk5e->decrypt)(kctx, offset, len, buf,
&headskip, &tailskip);
if (err)
return GSS_S_FAILURE;
@ -556,41 +522,3 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, int len,
*slack = *align + XDR_QUADLEN(ec + GSS_KRB5_TOK_HDR_LEN + tailskip);
return GSS_S_COMPLETE;
}
u32
gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
struct xdr_buf *buf, struct page **pages)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
switch (kctx->enctype) {
default:
BUG();
case ENCTYPE_DES_CBC_RAW:
case ENCTYPE_DES3_CBC_RAW:
return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
return gss_wrap_kerberos_v2(kctx, offset, buf, pages);
}
}
u32
gss_unwrap_kerberos(struct gss_ctx *gctx, int offset,
int len, struct xdr_buf *buf)
{
struct krb5_ctx *kctx = gctx->internal_ctx_id;
switch (kctx->enctype) {
default:
BUG();
case ENCTYPE_DES_CBC_RAW:
case ENCTYPE_DES3_CBC_RAW:
return gss_unwrap_kerberos_v1(kctx, offset, len, buf,
&gctx->slack, &gctx->align);
case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
return gss_unwrap_kerberos_v2(kctx, offset, len, buf,
&gctx->slack, &gctx->align);
}
}

File diff suppressed because it is too large Load Diff

View File

@ -33,6 +33,7 @@ struct sunrpc_net {
int pipe_version;
atomic_t pipe_users;
struct proc_dir_entry *use_gssp_proc;
struct proc_dir_entry *gss_krb5_enctypes;
};
extern unsigned int sunrpc_net_id;

View File

@ -83,7 +83,8 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp)
{
const struct svc_program *prog = statp->program;
const struct svc_version *vers;
unsigned int i, j;
unsigned int i, j, k;
unsigned long count;
seq_printf(seq,
"net %u %u %u %u\n",
@ -104,8 +105,12 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp)
if (!vers)
continue;
seq_printf(seq, "proc%d %u", i, vers->vs_nproc);
for (j = 0; j < vers->vs_nproc; j++)
seq_printf(seq, " %u", vers->vs_count[j]);
for (j = 0; j < vers->vs_nproc; j++) {
count = 0;
for_each_possible_cpu(k)
count += per_cpu(vers->vs_count[j], k);
seq_printf(seq, " %lu", count);
}
seq_putc(seq, '\n');
}
}

View File

@ -512,6 +512,10 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
INIT_LIST_HEAD(&pool->sp_sockets);
INIT_LIST_HEAD(&pool->sp_all_threads);
spin_lock_init(&pool->sp_lock);
percpu_counter_init(&pool->sp_sockets_queued, 0, GFP_KERNEL);
percpu_counter_init(&pool->sp_threads_woken, 0, GFP_KERNEL);
percpu_counter_init(&pool->sp_threads_timedout, 0, GFP_KERNEL);
}
return serv;
@ -565,6 +569,7 @@ void
svc_destroy(struct kref *ref)
{
struct svc_serv *serv = container_of(ref, struct svc_serv, sv_refcnt);
unsigned int i;
dprintk("svc: svc_destroy(%s)\n", serv->sv_program->pg_name);
timer_shutdown_sync(&serv->sv_temptimer);
@ -580,6 +585,13 @@ svc_destroy(struct kref *ref)
svc_pool_map_put(serv->sv_nrpools);
for (i = 0; i < serv->sv_nrpools; i++) {
struct svc_pool *pool = &serv->sv_pools[i];
percpu_counter_destroy(&pool->sp_sockets_queued);
percpu_counter_destroy(&pool->sp_threads_woken);
percpu_counter_destroy(&pool->sp_threads_timedout);
}
kfree(serv->sv_pools);
kfree(serv);
}
@ -1208,7 +1220,7 @@ svc_generic_init_request(struct svc_rqst *rqstp,
memset(rqstp->rq_resp, 0, procp->pc_ressize);
/* Bump per-procedure stats counter */
versp->vs_count[rqstp->rq_proc]++;
this_cpu_inc(versp->vs_count[rqstp->rq_proc]);
ret->dispatch = versp->vs_dispatch;
return rpc_success;
@ -1225,22 +1237,16 @@ EXPORT_SYMBOL_GPL(svc_generic_init_request);
* Common routine for processing the RPC request.
*/
static int
svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
svc_process_common(struct svc_rqst *rqstp)
{
struct xdr_stream *xdr = &rqstp->rq_res_stream;
struct svc_program *progp;
const struct svc_procedure *procp = NULL;
struct svc_serv *serv = rqstp->rq_server;
struct svc_process_info process;
__be32 *statp;
u32 prog, vers;
__be32 rpc_stat;
int auth_res, rc;
__be32 *reply_statp;
rpc_stat = rpc_success;
if (argv->iov_len < 6*4)
goto err_short_len;
unsigned int aoffset;
__be32 *p;
/* Will be turned off by GSS integrity and privacy services */
set_bit(RQ_SPLICE_OK, &rqstp->rq_flags);
@ -1248,27 +1254,25 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
set_bit(RQ_USEDEFERRAL, &rqstp->rq_flags);
clear_bit(RQ_DROPME, &rqstp->rq_flags);
svc_putu32(resv, rqstp->rq_xid);
/* Construct the first words of the reply: */
svcxdr_init_encode(rqstp);
xdr_stream_encode_be32(xdr, rqstp->rq_xid);
xdr_stream_encode_be32(xdr, rpc_reply);
vers = svc_getnl(argv);
/* First words of reply: */
svc_putnl(resv, 1); /* REPLY */
if (vers != 2) /* RPC version number */
p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 4);
if (unlikely(!p))
goto err_short_len;
if (*p++ != cpu_to_be32(RPC_VERSION))
goto err_bad_rpc;
/* Save position in case we later decide to reject: */
reply_statp = resv->iov_base + resv->iov_len;
xdr_stream_encode_be32(xdr, rpc_msg_accepted);
svc_putnl(resv, 0); /* ACCEPT */
rqstp->rq_prog = prog = svc_getnl(argv); /* program number */
rqstp->rq_vers = svc_getnl(argv); /* version number */
rqstp->rq_proc = svc_getnl(argv); /* procedure number */
rqstp->rq_prog = be32_to_cpup(p++);
rqstp->rq_vers = be32_to_cpup(p++);
rqstp->rq_proc = be32_to_cpup(p);
for (progp = serv->sv_program; progp; progp = progp->pg_next)
if (prog == progp->pg_prog)
if (rqstp->rq_prog == progp->pg_prog)
break;
/*
@ -1285,10 +1289,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
case SVC_OK:
break;
case SVC_GARBAGE:
goto err_garbage;
goto err_garbage_args;
case SVC_SYSERR:
rpc_stat = rpc_system_err;
goto err_bad;
goto err_system_err;
case SVC_DENIED:
goto err_bad_auth;
case SVC_CLOSE:
@ -1302,8 +1305,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
if (progp == NULL)
goto err_bad_prog;
rpc_stat = progp->pg_init_request(rqstp, progp, &process);
switch (rpc_stat) {
switch (progp->pg_init_request(rqstp, progp, &process)) {
case rpc_success:
break;
case rpc_prog_unavail:
@ -1323,9 +1325,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
serv->sv_stats->rpccnt++;
trace_svc_process(rqstp, progp->pg_name);
/* Build the reply header. */
statp = resv->iov_base +resv->iov_len;
svc_putnl(resv, RPC_SUCCESS);
aoffset = xdr_stream_pos(xdr);
/* un-reserve some of the out-queue now that we have a
* better idea of reply size
@ -1334,7 +1334,7 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
svc_reserve_auth(rqstp, procp->pc_xdrressize<<2);
/* Call the function that processes the request. */
rc = process.dispatch(rqstp, statp);
rc = process.dispatch(rqstp);
if (procp->pc_release)
procp->pc_release(rqstp);
if (!rc)
@ -1342,9 +1342,8 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
if (rqstp->rq_auth_stat != rpc_auth_ok)
goto err_bad_auth;
/* Check RPC status result */
if (*statp != rpc_success)
resv->iov_len = ((void*)statp) - resv->iov_base + 4;
if (*rqstp->rq_accept_statp != rpc_success)
xdr_truncate_encode(xdr, aoffset);
if (procp->pc_encode == NULL)
goto dropit;
@ -1368,33 +1367,34 @@ close_xprt:
return 0;
err_short_len:
svc_printk(rqstp, "short len %zd, dropping request\n",
argv->iov_len);
svc_printk(rqstp, "short len %u, dropping request\n",
rqstp->rq_arg.len);
goto close_xprt;
err_bad_rpc:
serv->sv_stats->rpcbadfmt++;
svc_putnl(resv, 1); /* REJECT */
svc_putnl(resv, 0); /* RPC_MISMATCH */
svc_putnl(resv, 2); /* Only RPCv2 supported */
svc_putnl(resv, 2);
xdr_stream_encode_u32(xdr, RPC_MSG_DENIED);
xdr_stream_encode_u32(xdr, RPC_MISMATCH);
/* Only RPCv2 supported */
xdr_stream_encode_u32(xdr, RPC_VERSION);
xdr_stream_encode_u32(xdr, RPC_VERSION);
goto sendit;
err_bad_auth:
dprintk("svc: authentication failed (%d)\n",
be32_to_cpu(rqstp->rq_auth_stat));
serv->sv_stats->rpcbadauth++;
/* Restore write pointer to location of accept status: */
xdr_ressize_check(rqstp, reply_statp);
svc_putnl(resv, 1); /* REJECT */
svc_putnl(resv, 1); /* AUTH_ERROR */
svc_putu32(resv, rqstp->rq_auth_stat); /* status */
/* Restore write pointer to location of reply status: */
xdr_truncate_encode(xdr, XDR_UNIT * 2);
xdr_stream_encode_u32(xdr, RPC_MSG_DENIED);
xdr_stream_encode_u32(xdr, RPC_AUTH_ERROR);
xdr_stream_encode_be32(xdr, rqstp->rq_auth_stat);
goto sendit;
err_bad_prog:
dprintk("svc: unknown program %d\n", prog);
dprintk("svc: unknown program %d\n", rqstp->rq_prog);
serv->sv_stats->rpcbadfmt++;
svc_putnl(resv, RPC_PROG_UNAVAIL);
xdr_stream_encode_u32(xdr, RPC_PROG_UNAVAIL);
goto sendit;
err_bad_vers:
@ -1402,25 +1402,28 @@ err_bad_vers:
rqstp->rq_vers, rqstp->rq_prog, progp->pg_name);
serv->sv_stats->rpcbadfmt++;
svc_putnl(resv, RPC_PROG_MISMATCH);
svc_putnl(resv, process.mismatch.lovers);
svc_putnl(resv, process.mismatch.hivers);
xdr_stream_encode_u32(xdr, RPC_PROG_MISMATCH);
xdr_stream_encode_u32(xdr, process.mismatch.lovers);
xdr_stream_encode_u32(xdr, process.mismatch.hivers);
goto sendit;
err_bad_proc:
svc_printk(rqstp, "unknown procedure (%d)\n", rqstp->rq_proc);
serv->sv_stats->rpcbadfmt++;
svc_putnl(resv, RPC_PROC_UNAVAIL);
xdr_stream_encode_u32(xdr, RPC_PROC_UNAVAIL);
goto sendit;
err_garbage:
svc_printk(rqstp, "failed to decode args\n");
err_garbage_args:
svc_printk(rqstp, "failed to decode RPC header\n");
rpc_stat = rpc_garbage_args;
err_bad:
serv->sv_stats->rpcbadfmt++;
svc_putnl(resv, ntohl(rpc_stat));
xdr_stream_encode_u32(xdr, RPC_GARBAGE_ARGS);
goto sendit;
err_system_err:
serv->sv_stats->rpcbadfmt++;
xdr_stream_encode_u32(xdr, RPC_SYSTEM_ERR);
goto sendit;
}
@ -1430,9 +1433,8 @@ err_bad:
int
svc_process(struct svc_rqst *rqstp)
{
struct kvec *argv = &rqstp->rq_arg.head[0];
struct kvec *resv = &rqstp->rq_res.head[0];
__be32 dir;
__be32 *p;
#if IS_ENABLED(CONFIG_FAIL_SUNRPC)
if (!fail_sunrpc.ignore_server_disconnect &&
@ -1455,16 +1457,21 @@ svc_process(struct svc_rqst *rqstp)
rqstp->rq_res.tail[0].iov_base = NULL;
rqstp->rq_res.tail[0].iov_len = 0;
dir = svc_getu32(argv);
if (dir != rpc_call)
svcxdr_init_decode(rqstp);
p = xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2);
if (unlikely(!p))
goto out_drop;
rqstp->rq_xid = *p++;
if (unlikely(*p != rpc_call))
goto out_baddir;
if (!svc_process_common(rqstp, argv, resv))
if (!svc_process_common(rqstp))
goto out_drop;
return svc_send(rqstp);
out_baddir:
svc_printk(rqstp, "bad direction 0x%08x, dropping request\n",
be32_to_cpu(dir));
be32_to_cpu(*p));
rqstp->rq_server->sv_stats->rpcbadfmt++;
out_drop:
svc_drop(rqstp);
@ -1481,8 +1488,6 @@ int
bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
struct svc_rqst *rqstp)
{
struct kvec *argv = &rqstp->rq_arg.head[0];
struct kvec *resv = &rqstp->rq_res.head[0];
struct rpc_task *task;
int proc_error;
int error;
@ -1513,18 +1518,21 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
rqstp->rq_arg.len = rqstp->rq_arg.head[0].iov_len +
rqstp->rq_arg.page_len;
/* reset result send buffer "put" position */
resv->iov_len = 0;
/* Reset the response buffer */
rqstp->rq_res.head[0].iov_len = 0;
/*
* Skip the next two words because they've already been
* processed in the transport
* Skip the XID and calldir fields because they've already
* been processed by the caller.
*/
svc_getu32(argv); /* XID */
svc_getnl(argv); /* CALLDIR */
svcxdr_init_decode(rqstp);
if (!xdr_inline_decode(&rqstp->rq_arg_stream, XDR_UNIT * 2)) {
error = -EINVAL;
goto out;
}
/* Parse and execute the bc call */
proc_error = svc_process_common(rqstp, argv, resv);
proc_error = svc_process_common(rqstp);
atomic_dec(&req->rq_xprt->bc_slot_count);
if (!proc_error) {

View File

@ -462,11 +462,9 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
pool = svc_pool_for_cpu(xprt->xpt_server);
atomic_long_inc(&pool->sp_stats.packets);
percpu_counter_inc(&pool->sp_sockets_queued);
spin_lock_bh(&pool->sp_lock);
list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
pool->sp_stats.sockets_queued++;
spin_unlock_bh(&pool->sp_lock);
/* find a thread for this xprt */
@ -474,7 +472,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt)
list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
continue;
atomic_long_inc(&pool->sp_stats.threads_woken);
percpu_counter_inc(&pool->sp_threads_woken);
rqstp->rq_qtime = ktime_get();
wake_up_process(rqstp->rq_task);
goto out_unlock;
@ -769,7 +767,7 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
goto out_found;
if (!time_left)
atomic_long_inc(&pool->sp_stats.threads_timedout);
percpu_counter_inc(&pool->sp_threads_timedout);
if (signalled() || kthread_should_stop())
return ERR_PTR(-EINTR);
@ -888,9 +886,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
clear_bit(XPT_OLD, &xprt->xpt_flags);
xprt->xpt_ops->xpo_secure_port(rqstp);
rqstp->rq_chandle.defer = svc_defer;
rqstp->rq_xid = svc_getu32(&rqstp->rq_arg.head[0]);
if (serv->sv_stats)
serv->sv_stats->netcnt++;
@ -1441,12 +1437,12 @@ static int svc_pool_stats_show(struct seq_file *m, void *p)
return 0;
}
seq_printf(m, "%u %lu %lu %lu %lu\n",
seq_printf(m, "%u %llu %llu %llu %llu\n",
pool->sp_id,
(unsigned long)atomic_long_read(&pool->sp_stats.packets),
pool->sp_stats.sockets_queued,
(unsigned long)atomic_long_read(&pool->sp_stats.threads_woken),
(unsigned long)atomic_long_read(&pool->sp_stats.threads_timedout));
percpu_counter_sum_positive(&pool->sp_sockets_queued),
percpu_counter_sum_positive(&pool->sp_sockets_queued),
percpu_counter_sum_positive(&pool->sp_threads_woken),
percpu_counter_sum_positive(&pool->sp_threads_timedout));
return 0;
}

View File

@ -63,14 +63,17 @@ svc_put_auth_ops(struct auth_ops *aops)
int
svc_authenticate(struct svc_rqst *rqstp)
{
rpc_authflavor_t flavor;
struct auth_ops *aops;
struct auth_ops *aops;
u32 flavor;
rqstp->rq_auth_stat = rpc_auth_ok;
flavor = svc_getnl(&rqstp->rq_arg.head[0]);
dprintk("svc: svc_authenticate (%d)\n", flavor);
/*
* Decode the Call credential's flavor field. The credential's
* body field is decoded in the chosen ->accept method below.
*/
if (xdr_stream_decode_u32(&rqstp->rq_arg_stream, &flavor) < 0)
return SVC_GARBAGE;
aops = svc_get_auth_ops(flavor);
if (aops == NULL) {

View File

@ -729,23 +729,38 @@ out:
EXPORT_SYMBOL_GPL(svcauth_unix_set_client);
/**
* svcauth_null_accept - Decode and validate incoming RPC_AUTH_NULL credential
* @rqstp: RPC transaction
*
* Return values:
* %SVC_OK: Both credential and verifier are valid
* %SVC_DENIED: Credential or verifier is not valid
* %SVC_GARBAGE: Failed to decode credential or verifier
* %SVC_CLOSE: Temporary failure
*
* rqstp->rq_auth_stat is set as mandated by RFC 5531.
*/
static int
svcauth_null_accept(struct svc_rqst *rqstp)
{
struct kvec *argv = &rqstp->rq_arg.head[0];
struct kvec *resv = &rqstp->rq_res.head[0];
struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct svc_cred *cred = &rqstp->rq_cred;
u32 flavor, len;
void *body;
if (argv->iov_len < 3*4)
/* Length of Call's credential body field: */
if (xdr_stream_decode_u32(xdr, &len) < 0)
return SVC_GARBAGE;
if (svc_getu32(argv) != 0) {
dprintk("svc: bad null cred\n");
if (len != 0) {
rqstp->rq_auth_stat = rpc_autherr_badcred;
return SVC_DENIED;
}
if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
dprintk("svc: bad null verf\n");
/* Call's verf field: */
if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0)
return SVC_GARBAGE;
if (flavor != RPC_AUTH_NULL || len != 0) {
rqstp->rq_auth_stat = rpc_autherr_badverf;
return SVC_DENIED;
}
@ -757,9 +772,11 @@ svcauth_null_accept(struct svc_rqst *rqstp)
if (cred->cr_group_info == NULL)
return SVC_CLOSE; /* kmalloc failure - client must retry */
/* Put NULL verifier */
svc_putnl(resv, RPC_AUTH_NULL);
svc_putnl(resv, 0);
if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream,
RPC_AUTH_NULL, NULL, 0) < 0)
return SVC_CLOSE;
if (!svcxdr_set_accept_stat(rqstp))
return SVC_CLOSE;
rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL;
return SVC_OK;
@ -783,31 +800,45 @@ struct auth_ops svcauth_null = {
.name = "null",
.owner = THIS_MODULE,
.flavour = RPC_AUTH_NULL,
.accept = svcauth_null_accept,
.accept = svcauth_null_accept,
.release = svcauth_null_release,
.set_client = svcauth_unix_set_client,
};
/**
* svcauth_tls_accept - Decode and validate incoming RPC_AUTH_TLS credential
* @rqstp: RPC transaction
*
* Return values:
* %SVC_OK: Both credential and verifier are valid
* %SVC_DENIED: Credential or verifier is not valid
* %SVC_GARBAGE: Failed to decode credential or verifier
* %SVC_CLOSE: Temporary failure
*
* rqstp->rq_auth_stat is set as mandated by RFC 5531.
*/
static int
svcauth_tls_accept(struct svc_rqst *rqstp)
{
struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct svc_cred *cred = &rqstp->rq_cred;
struct kvec *argv = rqstp->rq_arg.head;
struct kvec *resv = rqstp->rq_res.head;
u32 flavor, len;
void *body;
__be32 *p;
if (argv->iov_len < XDR_UNIT * 3)
/* Length of Call's credential body field: */
if (xdr_stream_decode_u32(xdr, &len) < 0)
return SVC_GARBAGE;
/* Call's cred length */
if (svc_getu32(argv) != xdr_zero) {
if (len != 0) {
rqstp->rq_auth_stat = rpc_autherr_badcred;
return SVC_DENIED;
}
/* Call's verifier flavor and its length */
if (svc_getu32(argv) != rpc_auth_null ||
svc_getu32(argv) != xdr_zero) {
/* Call's verf field: */
if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0)
return SVC_GARBAGE;
if (flavor != RPC_AUTH_NULL || len != 0) {
rqstp->rq_auth_stat = rpc_autherr_badverf;
return SVC_DENIED;
}
@ -818,21 +849,27 @@ svcauth_tls_accept(struct svc_rqst *rqstp)
return SVC_DENIED;
}
/* Mapping to nobody uid/gid is required */
/* Signal that mapping to nobody uid/gid is required */
cred->cr_uid = INVALID_UID;
cred->cr_gid = INVALID_GID;
cred->cr_group_info = groups_alloc(0);
if (cred->cr_group_info == NULL)
return SVC_CLOSE; /* kmalloc failure - client must retry */
return SVC_CLOSE;
/* Reply's verifier */
svc_putnl(resv, RPC_AUTH_NULL);
if (rqstp->rq_xprt->xpt_ops->xpo_start_tls) {
svc_putnl(resv, 8);
memcpy(resv->iov_base + resv->iov_len, "STARTTLS", 8);
resv->iov_len += 8;
} else
svc_putnl(resv, 0);
p = xdr_reserve_space(&rqstp->rq_res_stream, XDR_UNIT * 2 + 8);
if (!p)
return SVC_CLOSE;
*p++ = rpc_auth_null;
*p++ = cpu_to_be32(8);
memcpy(p, "STARTTLS", 8);
} else {
if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream,
RPC_AUTH_NULL, NULL, 0) < 0)
return SVC_CLOSE;
}
if (!svcxdr_set_accept_stat(rqstp))
return SVC_CLOSE;
rqstp->rq_cred.cr_flavor = RPC_AUTH_TLS;
return SVC_OK;
@ -842,32 +879,48 @@ struct auth_ops svcauth_tls = {
.name = "tls",
.owner = THIS_MODULE,
.flavour = RPC_AUTH_TLS,
.accept = svcauth_tls_accept,
.accept = svcauth_tls_accept,
.release = svcauth_null_release,
.set_client = svcauth_unix_set_client,
};
/**
* svcauth_unix_accept - Decode and validate incoming RPC_AUTH_SYS credential
* @rqstp: RPC transaction
*
* Return values:
* %SVC_OK: Both credential and verifier are valid
* %SVC_DENIED: Credential or verifier is not valid
* %SVC_GARBAGE: Failed to decode credential or verifier
* %SVC_CLOSE: Temporary failure
*
* rqstp->rq_auth_stat is set as mandated by RFC 5531.
*/
static int
svcauth_unix_accept(struct svc_rqst *rqstp)
{
struct kvec *argv = &rqstp->rq_arg.head[0];
struct kvec *resv = &rqstp->rq_res.head[0];
struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct svc_cred *cred = &rqstp->rq_cred;
struct user_namespace *userns;
u32 slen, i;
int len = argv->iov_len;
u32 flavor, len, i;
void *body;
__be32 *p;
if ((len -= 3*4) < 0)
/*
* This implementation ignores the length of the Call's
* credential body field and the timestamp and machinename
* fields.
*/
p = xdr_inline_decode(xdr, XDR_UNIT * 3);
if (!p)
return SVC_GARBAGE;
len = be32_to_cpup(p + 2);
if (len > RPC_MAX_MACHINENAME)
return SVC_GARBAGE;
if (!xdr_inline_decode(xdr, len))
return SVC_GARBAGE;
svc_getu32(argv); /* length */
svc_getu32(argv); /* time stamp */
slen = XDR_QUADLEN(svc_getnl(argv)); /* machname length */
if (slen > 64 || (len -= (slen + 3)*4) < 0)
goto badcred;
argv->iov_base = (void*)((__be32*)argv->iov_base + slen); /* skip machname */
argv->iov_len -= slen*4;
/*
* Note: we skip uid_valid()/gid_valid() checks here for
* backwards compatibility with clients that use -1 id's.
@ -877,27 +930,42 @@ svcauth_unix_accept(struct svc_rqst *rqstp)
*/
userns = (rqstp->rq_xprt && rqstp->rq_xprt->xpt_cred) ?
rqstp->rq_xprt->xpt_cred->user_ns : &init_user_ns;
cred->cr_uid = make_kuid(userns, svc_getnl(argv)); /* uid */
cred->cr_gid = make_kgid(userns, svc_getnl(argv)); /* gid */
slen = svc_getnl(argv); /* gids length */
if (slen > UNX_NGROUPS || (len -= (slen + 2)*4) < 0)
if (xdr_stream_decode_u32(xdr, &i) < 0)
return SVC_GARBAGE;
cred->cr_uid = make_kuid(userns, i);
if (xdr_stream_decode_u32(xdr, &i) < 0)
return SVC_GARBAGE;
cred->cr_gid = make_kgid(userns, i);
if (xdr_stream_decode_u32(xdr, &len) < 0)
return SVC_GARBAGE;
if (len > UNX_NGROUPS)
goto badcred;
cred->cr_group_info = groups_alloc(slen);
p = xdr_inline_decode(xdr, XDR_UNIT * len);
if (!p)
return SVC_GARBAGE;
cred->cr_group_info = groups_alloc(len);
if (cred->cr_group_info == NULL)
return SVC_CLOSE;
for (i = 0; i < slen; i++) {
kgid_t kgid = make_kgid(userns, svc_getnl(argv));
for (i = 0; i < len; i++) {
kgid_t kgid = make_kgid(userns, be32_to_cpup(p++));
cred->cr_group_info->gid[i] = kgid;
}
groups_sort(cred->cr_group_info);
if (svc_getu32(argv) != htonl(RPC_AUTH_NULL) || svc_getu32(argv) != 0) {
/* Call's verf field: */
if (xdr_stream_decode_opaque_auth(xdr, &flavor, &body, &len) < 0)
return SVC_GARBAGE;
if (flavor != RPC_AUTH_NULL || len != 0) {
rqstp->rq_auth_stat = rpc_autherr_badverf;
return SVC_DENIED;
}
/* Put NULL verifier */
svc_putnl(resv, RPC_AUTH_NULL);
svc_putnl(resv, 0);
if (xdr_stream_encode_opaque_auth(&rqstp->rq_res_stream,
RPC_AUTH_NULL, NULL, 0) < 0)
return SVC_CLOSE;
if (!svcxdr_set_accept_stat(rqstp))
return SVC_CLOSE;
rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX;
return SVC_OK;
@ -927,7 +995,7 @@ struct auth_ops svcauth_unix = {
.name = "unix",
.owner = THIS_MODULE,
.flavour = RPC_AUTH_UNIX,
.accept = svcauth_unix_accept,
.accept = svcauth_unix_accept,
.release = svcauth_unix_release,
.domain_release = svcauth_unix_domain_release,
.set_client = svcauth_unix_set_client,

View File

@ -508,6 +508,7 @@ static int svc_udp_recvfrom(struct svc_rqst *rqstp)
if (serv->sv_stats)
serv->sv_stats->netudpcnt++;
svc_sock_secure_port(rqstp);
svc_xprt_received(rqstp->rq_xprt);
return len;
@ -636,7 +637,6 @@ static const struct svc_xprt_ops svc_udp_ops = {
.xpo_free = svc_sock_free,
.xpo_has_wspace = svc_udp_has_wspace,
.xpo_accept = svc_udp_accept,
.xpo_secure_port = svc_sock_secure_port,
.xpo_kill_temp_xprt = svc_udp_kill_temp_xprt,
};
@ -1030,6 +1030,7 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
if (serv->sv_stats)
serv->sv_stats->nettcpcnt++;
svc_sock_secure_port(rqstp);
svc_xprt_received(rqstp->rq_xprt);
return rqstp->rq_arg.len;
@ -1211,7 +1212,6 @@ static const struct svc_xprt_ops svc_tcp_ops = {
.xpo_free = svc_sock_free,
.xpo_has_wspace = svc_tcp_has_wspace,
.xpo_accept = svc_tcp_accept,
.xpo_secure_port = svc_sock_secure_port,
.xpo_kill_temp_xprt = svc_tcp_kill_temp_xprt,
};

View File

@ -862,13 +862,6 @@ static unsigned int xdr_shrink_pagelen(struct xdr_buf *buf, unsigned int len)
return shift;
}
void
xdr_shift_buf(struct xdr_buf *buf, size_t len)
{
xdr_shrink_bufhead(buf, buf->head->iov_len - len);
}
EXPORT_SYMBOL_GPL(xdr_shift_buf);
/**
* xdr_stream_pos - Return the current offset from the start of the xdr_stream
* @xdr: pointer to struct xdr_stream
@ -1191,6 +1184,21 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
}
EXPORT_SYMBOL(xdr_truncate_encode);
/**
* xdr_truncate_decode - Truncate a decoding stream
* @xdr: pointer to struct xdr_stream
* @len: Number of bytes to remove
*
*/
void xdr_truncate_decode(struct xdr_stream *xdr, size_t len)
{
unsigned int nbytes = xdr_align_size(len);
xdr->buf->len -= nbytes;
xdr->nwords -= XDR_QUADLEN(nbytes);
}
EXPORT_SYMBOL_GPL(xdr_truncate_decode);
/**
* xdr_restrict_buflen - decrease available buffer space
* @xdr: pointer to xdr_stream
@ -2273,3 +2281,60 @@ ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
return ret;
}
EXPORT_SYMBOL_GPL(xdr_stream_decode_string_dup);
/**
* xdr_stream_decode_opaque_auth - Decode struct opaque_auth (RFC5531 S8.2)
* @xdr: pointer to xdr_stream
* @flavor: location to store decoded flavor
* @body: location to store decode body
* @body_len: location to store length of decoded body
*
* Return values:
* On success, returns the number of buffer bytes consumed
* %-EBADMSG on XDR buffer overflow
* %-EMSGSIZE if the decoded size of the body field exceeds 400 octets
*/
ssize_t xdr_stream_decode_opaque_auth(struct xdr_stream *xdr, u32 *flavor,
void **body, unsigned int *body_len)
{
ssize_t ret, len;
len = xdr_stream_decode_u32(xdr, flavor);
if (unlikely(len < 0))
return len;
ret = xdr_stream_decode_opaque_inline(xdr, body, RPC_MAX_AUTH_SIZE);
if (unlikely(ret < 0))
return ret;
*body_len = ret;
return len + ret;
}
EXPORT_SYMBOL_GPL(xdr_stream_decode_opaque_auth);
/**
* xdr_stream_encode_opaque_auth - Encode struct opaque_auth (RFC5531 S8.2)
* @xdr: pointer to xdr_stream
* @flavor: verifier flavor to encode
* @body: content of body to encode
* @body_len: length of body to encode
*
* Return values:
* On success, returns length in bytes of XDR buffer consumed
* %-EBADMSG on XDR buffer overflow
* %-EMSGSIZE if the size of @body exceeds 400 octets
*/
ssize_t xdr_stream_encode_opaque_auth(struct xdr_stream *xdr, u32 flavor,
void *body, unsigned int body_len)
{
ssize_t ret, len;
if (unlikely(body_len > RPC_MAX_AUTH_SIZE))
return -EMSGSIZE;
len = xdr_stream_encode_u32(xdr, flavor);
if (unlikely(len < 0))
return len;
ret = xdr_stream_encode_opaque(xdr, body, body_len);
if (unlikely(ret < 0))
return ret;
return len + ret;
}
EXPORT_SYMBOL_GPL(xdr_stream_encode_opaque_auth);

View File

@ -847,6 +847,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
rqstp->rq_xprt_ctxt = ctxt;
rqstp->rq_prot = IPPROTO_MAX;
svc_xprt_copy_addrs(rqstp, xprt);
set_bit(RQ_SECURE, &rqstp->rq_flags);
return rqstp->rq_arg.len;
out_err:

View File

@ -73,7 +73,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt);
static void svc_rdma_detach(struct svc_xprt *xprt);
static void svc_rdma_free(struct svc_xprt *xprt);
static int svc_rdma_has_wspace(struct svc_xprt *xprt);
static void svc_rdma_secure_port(struct svc_rqst *);
static void svc_rdma_kill_temp_xprt(struct svc_xprt *);
static const struct svc_xprt_ops svc_rdma_ops = {
@ -86,7 +85,6 @@ static const struct svc_xprt_ops svc_rdma_ops = {
.xpo_free = svc_rdma_free,
.xpo_has_wspace = svc_rdma_has_wspace,
.xpo_accept = svc_rdma_accept,
.xpo_secure_port = svc_rdma_secure_port,
.xpo_kill_temp_xprt = svc_rdma_kill_temp_xprt,
};
@ -600,11 +598,6 @@ static int svc_rdma_has_wspace(struct svc_xprt *xprt)
return 1;
}
static void svc_rdma_secure_port(struct svc_rqst *rqstp)
{
set_bit(RQ_SECURE, &rqstp->rq_flags);
}
static void svc_rdma_kill_temp_xprt(struct svc_xprt *xprt)
{
}