linux-stable/fs/afs/fs_operation.c
David Howells 20325960f8 afs: Reorganise volume and server trees to be rooted on the cell
Reorganise afs_volume objects such that they're in a tree keyed on volume
ID, rooted at on an afs_cell object rather than being in multiple trees,
each of which is rooted on an afs_server object.

afs_server structs become per-cell and acquire a pointer to the cell.

The process of breaking a callback then starts with finding the server by
its network address, following that to the cell and then looking up each
volume ID in the volume tree.

This is simpler than the afs_vol_interest/afs_cb_interest N:M mapping web
and allows those structs and the code for maintaining them to be simplified
or removed.

It does make a couple of things a bit more tricky, though:

 (1) Operations now start with a volume, not a server, so there can be more
     than one answer as to whether or not the server we'll end up using
     supports the FS.InlineBulkStatus RPC.

 (2) CB RPC operations that specify the server UUID.  There's still a tree
     of servers by UUID on the afs_net struct, but the UUIDs in it aren't
     guaranteed unique.

Signed-off-by: David Howells <dhowells@redhat.com>
2020-06-04 15:37:57 +01:00

239 lines
5.2 KiB
C

// SPDX-License-Identifier: GPL-2.0-or-later
/* Fileserver-directed operation handling.
*
* Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include "internal.h"
static atomic_t afs_operation_debug_counter;
/*
* Create an operation against a volume.
*/
struct afs_operation *afs_alloc_operation(struct key *key, struct afs_volume *volume)
{
struct afs_operation *op;
_enter("");
op = kzalloc(sizeof(*op), GFP_KERNEL);
if (!op)
return ERR_PTR(-ENOMEM);
if (!key) {
key = afs_request_key(volume->cell);
if (IS_ERR(key)) {
kfree(op);
return ERR_CAST(key);
}
} else {
key_get(key);
}
op->key = key;
op->volume = afs_get_volume(volume, afs_volume_trace_get_new_op);
op->net = volume->cell->net;
op->cb_v_break = volume->cb_v_break;
op->debug_id = atomic_inc_return(&afs_operation_debug_counter);
op->error = -EDESTADDRREQ;
op->ac.error = SHRT_MAX;
_leave(" = [op=%08x]", op->debug_id);
return op;
}
/*
* Lock the vnode(s) being operated upon.
*/
static bool afs_get_io_locks(struct afs_operation *op)
{
struct afs_vnode *vnode = op->file[0].vnode;
struct afs_vnode *vnode2 = op->file[1].vnode;
_enter("");
if (op->flags & AFS_OPERATION_UNINTR) {
mutex_lock(&vnode->io_lock);
op->flags |= AFS_OPERATION_LOCK_0;
_leave(" = t [1]");
return true;
}
if (!vnode2 || !op->file[1].need_io_lock || vnode == vnode2)
vnode2 = NULL;
if (vnode2 > vnode)
swap(vnode, vnode2);
if (mutex_lock_interruptible(&vnode->io_lock) < 0) {
op->error = -EINTR;
op->flags |= AFS_OPERATION_STOP;
_leave(" = f [I 0]");
return false;
}
op->flags |= AFS_OPERATION_LOCK_0;
if (vnode2) {
if (mutex_lock_interruptible_nested(&vnode2->io_lock, 1) < 0) {
op->error = -EINTR;
op->flags |= AFS_OPERATION_STOP;
mutex_unlock(&vnode->io_lock);
op->flags &= ~AFS_OPERATION_LOCK_0;
_leave(" = f [I 1]");
return false;
}
op->flags |= AFS_OPERATION_LOCK_1;
}
_leave(" = t [2]");
return true;
}
static void afs_drop_io_locks(struct afs_operation *op)
{
struct afs_vnode *vnode = op->file[0].vnode;
struct afs_vnode *vnode2 = op->file[1].vnode;
_enter("");
if (op->flags & AFS_OPERATION_LOCK_1)
mutex_unlock(&vnode2->io_lock);
if (op->flags & AFS_OPERATION_LOCK_0)
mutex_unlock(&vnode->io_lock);
}
static void afs_prepare_vnode(struct afs_operation *op, struct afs_vnode_param *vp,
unsigned int index)
{
struct afs_vnode *vnode = vp->vnode;
if (vnode) {
vp->fid = vnode->fid;
vp->dv_before = vnode->status.data_version;
vp->cb_break_before = afs_calc_vnode_cb_break(vnode);
if (vnode->lock_state != AFS_VNODE_LOCK_NONE)
op->flags |= AFS_OPERATION_CUR_ONLY;
}
if (vp->fid.vnode)
_debug("PREP[%u] {%llx:%llu.%u}",
index, vp->fid.vid, vp->fid.vnode, vp->fid.unique);
}
/*
* Begin an operation on the fileserver.
*
* Fileserver operations are serialised on the server by vnode, so we serialise
* them here also using the io_lock.
*/
bool afs_begin_vnode_operation(struct afs_operation *op)
{
struct afs_vnode *vnode = op->file[0].vnode;
ASSERT(vnode);
_enter("");
if (op->file[0].need_io_lock)
if (!afs_get_io_locks(op))
return false;
afs_prepare_vnode(op, &op->file[0], 0);
afs_prepare_vnode(op, &op->file[1], 1);
op->cb_v_break = op->volume->cb_v_break;
_leave(" = true");
return true;
}
/*
* Tidy up a filesystem cursor and unlock the vnode.
*/
static void afs_end_vnode_operation(struct afs_operation *op)
{
_enter("");
if (op->error == -EDESTADDRREQ ||
op->error == -EADDRNOTAVAIL ||
op->error == -ENETUNREACH ||
op->error == -EHOSTUNREACH)
afs_dump_edestaddrreq(op);
afs_drop_io_locks(op);
if (op->error == -ECONNABORTED)
op->error = afs_abort_to_error(op->ac.abort_code);
}
/*
* Wait for an in-progress operation to complete.
*/
void afs_wait_for_operation(struct afs_operation *op)
{
_enter("");
while (afs_select_fileserver(op)) {
op->cb_s_break = op->server->cb_s_break;
if (test_bit(AFS_SERVER_FL_IS_YFS, &op->server->flags) &&
op->ops->issue_yfs_rpc)
op->ops->issue_yfs_rpc(op);
else
op->ops->issue_afs_rpc(op);
op->error = afs_wait_for_call_to_complete(op->call, &op->ac);
}
if (op->error == 0) {
_debug("success");
op->ops->success(op);
}
afs_end_vnode_operation(op);
if (op->error == 0 && op->ops->edit_dir) {
_debug("edit_dir");
op->ops->edit_dir(op);
}
_leave("");
}
/*
* Dispose of an operation.
*/
int afs_put_operation(struct afs_operation *op)
{
int i, ret = op->error;
_enter("op=%08x,%d", op->debug_id, ret);
if (op->ops && op->ops->put)
op->ops->put(op);
if (op->file[0].put_vnode)
iput(&op->file[0].vnode->vfs_inode);
if (op->file[1].put_vnode)
iput(&op->file[1].vnode->vfs_inode);
if (op->more_files) {
for (i = 0; i < op->nr_files - 2; i++)
if (op->more_files[i].put_vnode)
iput(&op->more_files[i].vnode->vfs_inode);
kfree(op->more_files);
}
afs_end_cursor(&op->ac);
afs_put_serverlist(op->net, op->server_list);
afs_put_volume(op->net, op->volume, afs_volume_trace_put_put_op);
kfree(op);
return ret;
}
int afs_do_sync_operation(struct afs_operation *op)
{
afs_begin_vnode_operation(op);
afs_wait_for_operation(op);
return afs_put_operation(op);
}