Two items:

- support for idmapped mounts in CephFS (Christian Brauner, Alexander
   Mikhalitsyn).  The series was originally developed by Christian and
   later picked up and brought over the finish line by Alexander, who
   also contributed an enabler on the MDS side (separate owner_{u,g}id
   fields on the wire).  The required exports for mnt_idmap_{get,put}()
   in VFS have been acked by Christian and received no objection from
   Christoph.
 
 - a churny change in CephFS logging to include cluster and client
   identifiers in log and debug messages (Xiubo Li).  This would help
   in scenarios with dozens of CephFS mounts on the same node which are
   getting increasingly common, especially in the Kubernetes world.
 -----BEGIN PGP SIGNATURE-----
 
 iQFHBAABCAAxFiEEydHwtzie9C7TfviiSn/eOAIR84sFAmVNDyMTHGlkcnlvbW92
 QGdtYWlsLmNvbQAKCRBKf944AhHziy6xCACmUikhgo+pZDO7oQS7HChdZFz5Q2Jb
 5K9K7sJr7Zb2gFKLAV93cyFrz0JRljmgZuA3DmDTH1omrkrVAJfHJ6Md1UFG3W7o
 LaowP3kECsykOiz+YtVU2957sfoFqds/q6KCXdp1Pc8WfNnL1vysCim6EGYtCqUm
 c6vv0zkvEDQp+3kjTN01LHzHFZdHg8+STNM4BMiB0sO/NqbADnaQBEtDpYrgzwh7
 YPwVIKcXutUmAKlb7vjUF9ptSICYkGV7B+loPS4BDva1I6dT42xqLQOu89tTKU0D
 DiQAfE2oRgU+fl+mMooFJNSqD25Q6IkXrPs0HuiSHS4wtLGqYZAwwfHB
 =F9o1
 -----END PGP SIGNATURE-----

Merge tag 'ceph-for-6.7-rc1' of https://github.com/ceph/ceph-client

Pull ceph updates from Ilya Dryomov:

 - support for idmapped mounts in CephFS (Christian Brauner, Alexander
   Mikhalitsyn).

   The series was originally developed by Christian and later picked up
   and brought over the finish line by Alexander, who also contributed
   an enabler on the MDS side (separate owner_{u,g}id fields on the
   wire).

   The required exports for mnt_idmap_{get,put}() in VFS have been acked
   by Christian and received no objection from Christoph.

 - a churny change in CephFS logging to include cluster and client
   identifiers in log and debug messages (Xiubo Li).

   This would help in scenarios with dozens of CephFS mounts on the same
   node which are getting increasingly common, especially in the
   Kubernetes world.

* tag 'ceph-for-6.7-rc1' of https://github.com/ceph/ceph-client:
  ceph: allow idmapped mounts
  ceph: allow idmapped atomic_open inode op
  ceph: allow idmapped set_acl inode op
  ceph: allow idmapped setattr inode op
  ceph: pass idmap to __ceph_setattr
  ceph: allow idmapped permission inode op
  ceph: allow idmapped getattr inode op
  ceph: pass an idmapping to mknod/symlink/mkdir
  ceph: add enable_unsafe_idmap module parameter
  ceph: handle idmapped mounts in create_request_message()
  ceph: stash idmapping in mdsc request
  fs: export mnt_idmap_get/mnt_idmap_put
  libceph, ceph: move mdsmap.h to fs/ceph
  ceph: print cluster fsid and client global_id in all debug logs
  ceph: rename _to_client() to _to_fs_client()
  ceph: pass the mdsc to several helpers
  libceph: add doutc and *_client debug macros support
This commit is contained in:
Linus Torvalds 2023-11-10 09:52:56 -08:00
commit e21165bfbc
26 changed files with 2061 additions and 1464 deletions

View File

@ -15,6 +15,7 @@
#include <linux/slab.h>
#include "super.h"
#include "mds_client.h"
static inline void ceph_set_cached_acl(struct inode *inode,
int type, struct posix_acl *acl)
@ -31,6 +32,7 @@ static inline void ceph_set_cached_acl(struct inode *inode,
struct posix_acl *ceph_get_acl(struct inode *inode, int type, bool rcu)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
int size;
unsigned int retry_cnt = 0;
const char *name;
@ -72,8 +74,8 @@ retry:
} else if (size == -ENODATA || size == 0) {
acl = NULL;
} else {
pr_err_ratelimited("get acl %llx.%llx failed, err=%d\n",
ceph_vinop(inode), size);
pr_err_ratelimited_client(cl, "%llx.%llx failed, err=%d\n",
ceph_vinop(inode), size);
acl = ERR_PTR(-EIO);
}
@ -105,7 +107,7 @@ int ceph_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
ret = posix_acl_update_mode(&nop_mnt_idmap, inode,
ret = posix_acl_update_mode(idmap, inode,
&new_mode, &acl);
if (ret)
goto out;
@ -140,7 +142,7 @@ int ceph_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
newattrs.ia_ctime = current_time(inode);
newattrs.ia_mode = new_mode;
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
ret = __ceph_setattr(inode, &newattrs, NULL);
ret = __ceph_setattr(idmap, inode, &newattrs, NULL);
if (ret)
goto out_free;
}
@ -151,7 +153,7 @@ int ceph_set_acl(struct mnt_idmap *idmap, struct dentry *dentry,
newattrs.ia_ctime = old_ctime;
newattrs.ia_mode = old_mode;
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
__ceph_setattr(inode, &newattrs, NULL);
__ceph_setattr(idmap, inode, &newattrs, NULL);
}
goto out_free;
}

View File

@ -79,18 +79,18 @@ static inline struct ceph_snap_context *page_snap_context(struct page *page)
*/
static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
{
struct inode *inode;
struct inode *inode = mapping->host;
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci;
struct ceph_snap_context *snapc;
if (folio_test_dirty(folio)) {
dout("%p dirty_folio %p idx %lu -- already dirty\n",
mapping->host, folio, folio->index);
doutc(cl, "%llx.%llx %p idx %lu -- already dirty\n",
ceph_vinop(inode), folio, folio->index);
VM_BUG_ON_FOLIO(!folio_test_private(folio), folio);
return false;
}
inode = mapping->host;
ci = ceph_inode(inode);
/* dirty the head */
@ -111,12 +111,12 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
if (ci->i_wrbuffer_ref == 0)
ihold(inode);
++ci->i_wrbuffer_ref;
dout("%p dirty_folio %p idx %lu head %d/%d -> %d/%d "
"snapc %p seq %lld (%d snaps)\n",
mapping->host, folio, folio->index,
ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
snapc, snapc->seq, snapc->num_snaps);
doutc(cl, "%llx.%llx %p idx %lu head %d/%d -> %d/%d "
"snapc %p seq %lld (%d snaps)\n",
ceph_vinop(inode), folio, folio->index,
ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref_head-1,
ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
snapc, snapc->seq, snapc->num_snaps);
spin_unlock(&ci->i_ceph_lock);
/*
@ -137,23 +137,22 @@ static bool ceph_dirty_folio(struct address_space *mapping, struct folio *folio)
static void ceph_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
struct inode *inode;
struct ceph_inode_info *ci;
struct inode *inode = folio->mapping->host;
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_snap_context *snapc;
inode = folio->mapping->host;
ci = ceph_inode(inode);
if (offset != 0 || length != folio_size(folio)) {
dout("%p invalidate_folio idx %lu partial dirty page %zu~%zu\n",
inode, folio->index, offset, length);
doutc(cl, "%llx.%llx idx %lu partial dirty page %zu~%zu\n",
ceph_vinop(inode), folio->index, offset, length);
return;
}
WARN_ON(!folio_test_locked(folio));
if (folio_test_private(folio)) {
dout("%p invalidate_folio idx %lu full dirty page\n",
inode, folio->index);
doutc(cl, "%llx.%llx idx %lu full dirty page\n",
ceph_vinop(inode), folio->index);
snapc = folio_detach_private(folio);
ceph_put_wrbuffer_cap_refs(ci, 1, snapc);
@ -166,10 +165,10 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset,
static bool ceph_release_folio(struct folio *folio, gfp_t gfp)
{
struct inode *inode = folio->mapping->host;
struct ceph_client *cl = ceph_inode_to_client(inode);
dout("%llx:%llx release_folio idx %lu (%sdirty)\n",
ceph_vinop(inode),
folio->index, folio_test_dirty(folio) ? "" : "not ");
doutc(cl, "%llx.%llx idx %lu (%sdirty)\n", ceph_vinop(inode),
folio->index, folio_test_dirty(folio) ? "" : "not ");
if (folio_test_private(folio))
return false;
@ -229,7 +228,7 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
{
struct inode *inode = subreq->rreq->inode;
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
u64 objno, objoff;
u32 xlen;
@ -244,7 +243,8 @@ static bool ceph_netfs_clamp_length(struct netfs_io_subrequest *subreq)
static void finish_netfs_read(struct ceph_osd_request *req)
{
struct inode *inode = req->r_inode;
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
struct netfs_io_subrequest *subreq = req->r_priv;
struct ceph_osd_req_op *op = &req->r_ops[0];
@ -254,8 +254,8 @@ static void finish_netfs_read(struct ceph_osd_request *req)
ceph_update_read_metrics(&fsc->mdsc->metric, req->r_start_latency,
req->r_end_latency, osd_data->length, err);
dout("%s: result %d subreq->len=%zu i_size=%lld\n", __func__, req->r_result,
subreq->len, i_size_read(req->r_inode));
doutc(cl, "result %d subreq->len=%zu i_size=%lld\n", req->r_result,
subreq->len, i_size_read(req->r_inode));
/* no object means success but no data */
if (err == -ENOENT)
@ -348,7 +348,8 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
struct netfs_io_request *rreq = subreq->rreq;
struct inode *inode = rreq->inode;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
struct ceph_osd_request *req = NULL;
struct ceph_vino vino = ceph_vino(inode);
struct iov_iter iter;
@ -383,7 +384,8 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
goto out;
}
dout("%s: pos=%llu orig_len=%zu len=%llu\n", __func__, subreq->start, subreq->len, len);
doutc(cl, "%llx.%llx pos=%llu orig_len=%zu len=%llu\n",
ceph_vinop(inode), subreq->start, subreq->len, len);
iov_iter_xarray(&iter, ITER_DEST, &rreq->mapping->i_pages, subreq->start, len);
@ -400,8 +402,8 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
err = iov_iter_get_pages_alloc2(&iter, &pages, len, &page_off);
if (err < 0) {
dout("%s: iov_ter_get_pages_alloc returned %d\n",
__func__, err);
doutc(cl, "%llx.%llx failed to allocate pages, %d\n",
ceph_vinop(inode), err);
goto out;
}
@ -429,12 +431,13 @@ out:
ceph_osdc_put_request(req);
if (err)
netfs_subreq_terminated(subreq, err, false);
dout("%s: result %d\n", __func__, err);
doutc(cl, "%llx.%llx result %d\n", ceph_vinop(inode), err);
}
static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
{
struct inode *inode = rreq->inode;
struct ceph_client *cl = ceph_inode_to_client(inode);
int got = 0, want = CEPH_CAP_FILE_CACHE;
struct ceph_netfs_request_data *priv;
int ret = 0;
@ -466,12 +469,12 @@ static int ceph_init_request(struct netfs_io_request *rreq, struct file *file)
*/
ret = ceph_try_get_caps(inode, CEPH_CAP_FILE_RD, want, true, &got);
if (ret < 0) {
dout("start_read %p, error getting cap\n", inode);
doutc(cl, "%llx.%llx, error getting cap\n", ceph_vinop(inode));
goto out;
}
if (!(got & want)) {
dout("start_read %p, no cache cap\n", inode);
doutc(cl, "%llx.%llx, no cache cap\n", ceph_vinop(inode));
ret = -EACCES;
goto out;
}
@ -563,13 +566,14 @@ get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
struct ceph_snap_context *page_snapc)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_snap_context *snapc = NULL;
struct ceph_cap_snap *capsnap = NULL;
spin_lock(&ci->i_ceph_lock);
list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
dout(" cap_snap %p snapc %p has %d dirty pages\n", capsnap,
capsnap->context, capsnap->dirty_pages);
doutc(cl, " capsnap %p snapc %p has %d dirty pages\n",
capsnap, capsnap->context, capsnap->dirty_pages);
if (!capsnap->dirty_pages)
continue;
@ -601,8 +605,8 @@ get_oldest_context(struct inode *inode, struct ceph_writeback_ctl *ctl,
}
if (!snapc && ci->i_wrbuffer_ref_head) {
snapc = ceph_get_snap_context(ci->i_head_snapc);
dout(" head snapc %p has %d dirty pages\n",
snapc, ci->i_wrbuffer_ref_head);
doutc(cl, " head snapc %p has %d dirty pages\n", snapc,
ci->i_wrbuffer_ref_head);
if (ctl) {
ctl->i_size = i_size_read(inode);
ctl->truncate_size = ci->i_truncate_size;
@ -658,7 +662,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
struct folio *folio = page_folio(page);
struct inode *inode = page->mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
struct ceph_snap_context *snapc, *oldest;
loff_t page_off = page_offset(page);
int err;
@ -670,7 +675,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
bool caching = ceph_is_cache_enabled(inode);
struct page *bounce_page = NULL;
dout("writepage %p idx %lu\n", page, page->index);
doutc(cl, "%llx.%llx page %p idx %lu\n", ceph_vinop(inode), page,
page->index);
if (ceph_inode_is_shutdown(inode))
return -EIO;
@ -678,13 +684,14 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
/* verify this is a writeable snap context */
snapc = page_snap_context(page);
if (!snapc) {
dout("writepage %p page %p not dirty?\n", inode, page);
doutc(cl, "%llx.%llx page %p not dirty?\n", ceph_vinop(inode),
page);
return 0;
}
oldest = get_oldest_context(inode, &ceph_wbc, snapc);
if (snapc->seq > oldest->seq) {
dout("writepage %p page %p snapc %p not writeable - noop\n",
inode, page, snapc);
doutc(cl, "%llx.%llx page %p snapc %p not writeable - noop\n",
ceph_vinop(inode), page, snapc);
/* we should only noop if called by kswapd */
WARN_ON(!(current->flags & PF_MEMALLOC));
ceph_put_snap_context(oldest);
@ -695,8 +702,8 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
/* is this a partial page at end of file? */
if (page_off >= ceph_wbc.i_size) {
dout("folio at %lu beyond eof %llu\n", folio->index,
ceph_wbc.i_size);
doutc(cl, "%llx.%llx folio at %lu beyond eof %llu\n",
ceph_vinop(inode), folio->index, ceph_wbc.i_size);
folio_invalidate(folio, 0, folio_size(folio));
return 0;
}
@ -705,8 +712,9 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
len = ceph_wbc.i_size - page_off;
wlen = IS_ENCRYPTED(inode) ? round_up(len, CEPH_FSCRYPT_BLOCK_SIZE) : len;
dout("writepage %p page %p index %lu on %llu~%llu snapc %p seq %lld\n",
inode, page, page->index, page_off, wlen, snapc, snapc->seq);
doutc(cl, "%llx.%llx page %p index %lu on %llu~%llu snapc %p seq %lld\n",
ceph_vinop(inode), page, page->index, page_off, wlen, snapc,
snapc->seq);
if (atomic_long_inc_return(&fsc->writeback_count) >
CONGESTION_ON_THRESH(fsc->mount_options->congestion_kb))
@ -747,8 +755,9 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
osd_req_op_extent_osd_data_pages(req, 0,
bounce_page ? &bounce_page : &page, wlen, 0,
false, false);
dout("writepage %llu~%llu (%llu bytes, %sencrypted)\n",
page_off, len, wlen, IS_ENCRYPTED(inode) ? "" : "not ");
doutc(cl, "%llx.%llx %llu~%llu (%llu bytes, %sencrypted)\n",
ceph_vinop(inode), page_off, len, wlen,
IS_ENCRYPTED(inode) ? "" : "not ");
req->r_mtime = inode_get_mtime(inode);
ceph_osdc_start_request(osdc, req);
@ -767,19 +776,21 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
wbc = &tmp_wbc;
if (err == -ERESTARTSYS) {
/* killed by SIGKILL */
dout("writepage interrupted page %p\n", page);
doutc(cl, "%llx.%llx interrupted page %p\n",
ceph_vinop(inode), page);
redirty_page_for_writepage(wbc, page);
end_page_writeback(page);
return err;
}
if (err == -EBLOCKLISTED)
fsc->blocklisted = true;
dout("writepage setting page/mapping error %d %p\n",
err, page);
doutc(cl, "%llx.%llx setting page/mapping error %d %p\n",
ceph_vinop(inode), err, page);
mapping_set_error(&inode->i_data, err);
wbc->pages_skipped++;
} else {
dout("writepage cleaned page %p\n", page);
doutc(cl, "%llx.%llx cleaned page %p\n",
ceph_vinop(inode), page);
err = 0; /* vfs expects us to return 0 */
}
oldest = detach_page_private(page);
@ -803,7 +814,7 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
ihold(inode);
if (wbc->sync_mode == WB_SYNC_NONE &&
ceph_inode_to_client(inode)->write_congested)
ceph_inode_to_fs_client(inode)->write_congested)
return AOP_WRITEPAGE_ACTIVATE;
wait_on_page_fscache(page);
@ -829,6 +840,7 @@ static void writepages_finish(struct ceph_osd_request *req)
{
struct inode *inode = req->r_inode;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_osd_data *osd_data;
struct page *page;
int num_pages, total_pages = 0;
@ -836,11 +848,11 @@ static void writepages_finish(struct ceph_osd_request *req)
int rc = req->r_result;
struct ceph_snap_context *snapc = req->r_snapc;
struct address_space *mapping = inode->i_mapping;
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
unsigned int len = 0;
bool remove_page;
dout("writepages_finish %p rc %d\n", inode, rc);
doutc(cl, "%llx.%llx rc %d\n", ceph_vinop(inode), rc);
if (rc < 0) {
mapping_set_error(mapping, rc);
ceph_set_error_write(ci);
@ -862,8 +874,10 @@ static void writepages_finish(struct ceph_osd_request *req)
/* clean all pages */
for (i = 0; i < req->r_num_ops; i++) {
if (req->r_ops[i].op != CEPH_OSD_OP_WRITE) {
pr_warn("%s incorrect op %d req %p index %d tid %llu\n",
__func__, req->r_ops[i].op, req, i, req->r_tid);
pr_warn_client(cl,
"%llx.%llx incorrect op %d req %p index %d tid %llu\n",
ceph_vinop(inode), req->r_ops[i].op, req, i,
req->r_tid);
break;
}
@ -890,7 +904,7 @@ static void writepages_finish(struct ceph_osd_request *req)
ceph_put_snap_context(detach_page_private(page));
end_page_writeback(page);
dout("unlocking %p\n", page);
doutc(cl, "unlocking %p\n", page);
if (remove_page)
generic_error_remove_page(inode->i_mapping,
@ -898,8 +912,9 @@ static void writepages_finish(struct ceph_osd_request *req)
unlock_page(page);
}
dout("writepages_finish %p wrote %llu bytes cleaned %d pages\n",
inode, osd_data->length, rc >= 0 ? num_pages : 0);
doutc(cl, "%llx.%llx wrote %llu bytes cleaned %d pages\n",
ceph_vinop(inode), osd_data->length,
rc >= 0 ? num_pages : 0);
release_pages(osd_data->pages, num_pages);
}
@ -926,7 +941,8 @@ static int ceph_writepages_start(struct address_space *mapping,
{
struct inode *inode = mapping->host;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
struct ceph_vino vino = ceph_vino(inode);
pgoff_t index, start_index, end = -1;
struct ceph_snap_context *snapc = NULL, *last_snapc = NULL, *pgsnapc;
@ -944,15 +960,15 @@ static int ceph_writepages_start(struct address_space *mapping,
fsc->write_congested)
return 0;
dout("writepages_start %p (mode=%s)\n", inode,
wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
(wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
doutc(cl, "%llx.%llx (mode=%s)\n", ceph_vinop(inode),
wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
(wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
if (ceph_inode_is_shutdown(inode)) {
if (ci->i_wrbuffer_ref > 0) {
pr_warn_ratelimited(
"writepage_start %p %lld forced umount\n",
inode, ceph_ino(inode));
pr_warn_ratelimited_client(cl,
"%llx.%llx %lld forced umount\n",
ceph_vinop(inode), ceph_ino(inode));
}
mapping_set_error(mapping, -EIO);
return -EIO; /* we're in a forced umount, don't write! */
@ -976,11 +992,11 @@ retry:
if (!snapc) {
/* hmm, why does writepages get called when there
is no dirty data? */
dout(" no snap context with dirty data?\n");
doutc(cl, " no snap context with dirty data?\n");
goto out;
}
dout(" oldest snapc is %p seq %lld (%d snaps)\n",
snapc, snapc->seq, snapc->num_snaps);
doutc(cl, " oldest snapc is %p seq %lld (%d snaps)\n", snapc,
snapc->seq, snapc->num_snaps);
should_loop = false;
if (ceph_wbc.head_snapc && snapc != last_snapc) {
@ -990,13 +1006,13 @@ retry:
end = -1;
if (index > 0)
should_loop = true;
dout(" cyclic, start at %lu\n", index);
doutc(cl, " cyclic, start at %lu\n", index);
} else {
index = wbc->range_start >> PAGE_SHIFT;
end = wbc->range_end >> PAGE_SHIFT;
if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
range_whole = true;
dout(" not cyclic, %lu to %lu\n", index, end);
doutc(cl, " not cyclic, %lu to %lu\n", index, end);
}
} else if (!ceph_wbc.head_snapc) {
/* Do not respect wbc->range_{start,end}. Dirty pages
@ -1005,7 +1021,7 @@ retry:
* associated with 'snapc' get written */
if (index > 0)
should_loop = true;
dout(" non-head snapc, range whole\n");
doutc(cl, " non-head snapc, range whole\n");
}
if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
@ -1028,12 +1044,12 @@ retry:
get_more_pages:
nr_folios = filemap_get_folios_tag(mapping, &index,
end, tag, &fbatch);
dout("pagevec_lookup_range_tag got %d\n", nr_folios);
doutc(cl, "pagevec_lookup_range_tag got %d\n", nr_folios);
if (!nr_folios && !locked_pages)
break;
for (i = 0; i < nr_folios && locked_pages < max_pages; i++) {
page = &fbatch.folios[i]->page;
dout("? %p idx %lu\n", page, page->index);
doutc(cl, "? %p idx %lu\n", page, page->index);
if (locked_pages == 0)
lock_page(page); /* first page */
else if (!trylock_page(page))
@ -1042,15 +1058,15 @@ get_more_pages:
/* only dirty pages, or our accounting breaks */
if (unlikely(!PageDirty(page)) ||
unlikely(page->mapping != mapping)) {
dout("!dirty or !mapping %p\n", page);
doutc(cl, "!dirty or !mapping %p\n", page);
unlock_page(page);
continue;
}
/* only if matching snap context */
pgsnapc = page_snap_context(page);
if (pgsnapc != snapc) {
dout("page snapc %p %lld != oldest %p %lld\n",
pgsnapc, pgsnapc->seq, snapc, snapc->seq);
doutc(cl, "page snapc %p %lld != oldest %p %lld\n",
pgsnapc, pgsnapc->seq, snapc, snapc->seq);
if (!should_loop &&
!ceph_wbc.head_snapc &&
wbc->sync_mode != WB_SYNC_NONE)
@ -1061,8 +1077,8 @@ get_more_pages:
if (page_offset(page) >= ceph_wbc.i_size) {
struct folio *folio = page_folio(page);
dout("folio at %lu beyond eof %llu\n",
folio->index, ceph_wbc.i_size);
doutc(cl, "folio at %lu beyond eof %llu\n",
folio->index, ceph_wbc.i_size);
if ((ceph_wbc.size_stable ||
folio_pos(folio) >= i_size_read(inode)) &&
folio_clear_dirty_for_io(folio))
@ -1072,23 +1088,23 @@ get_more_pages:
continue;
}
if (strip_unit_end && (page->index > strip_unit_end)) {
dout("end of strip unit %p\n", page);
doutc(cl, "end of strip unit %p\n", page);
unlock_page(page);
break;
}
if (PageWriteback(page) || PageFsCache(page)) {
if (wbc->sync_mode == WB_SYNC_NONE) {
dout("%p under writeback\n", page);
doutc(cl, "%p under writeback\n", page);
unlock_page(page);
continue;
}
dout("waiting on writeback %p\n", page);
doutc(cl, "waiting on writeback %p\n", page);
wait_on_page_writeback(page);
wait_on_page_fscache(page);
}
if (!clear_page_dirty_for_io(page)) {
dout("%p !clear_page_dirty_for_io\n", page);
doutc(cl, "%p !clear_page_dirty_for_io\n", page);
unlock_page(page);
continue;
}
@ -1143,8 +1159,8 @@ get_more_pages:
}
/* note position of first page in fbatch */
dout("%p will write page %p idx %lu\n",
inode, page, page->index);
doutc(cl, "%llx.%llx will write page %p idx %lu\n",
ceph_vinop(inode), page, page->index);
if (atomic_long_inc_return(&fsc->writeback_count) >
CONGESTION_ON_THRESH(
@ -1158,8 +1174,9 @@ get_more_pages:
locked_pages ? GFP_NOWAIT : GFP_NOFS);
if (IS_ERR(pages[locked_pages])) {
if (PTR_ERR(pages[locked_pages]) == -EINVAL)
pr_err("%s: inode->i_blkbits=%hhu\n",
__func__, inode->i_blkbits);
pr_err_client(cl,
"inode->i_blkbits=%hhu\n",
inode->i_blkbits);
/* better not fail on first page! */
BUG_ON(locked_pages == 0);
pages[locked_pages] = NULL;
@ -1193,7 +1210,7 @@ get_more_pages:
if (nr_folios && i == nr_folios &&
locked_pages < max_pages) {
dout("reached end fbatch, trying for more\n");
doutc(cl, "reached end fbatch, trying for more\n");
folio_batch_release(&fbatch);
goto get_more_pages;
}
@ -1254,8 +1271,8 @@ new_request:
/* Start a new extent */
osd_req_op_extent_dup_last(req, op_idx,
cur_offset - offset);
dout("writepages got pages at %llu~%llu\n",
offset, len);
doutc(cl, "got pages at %llu~%llu\n", offset,
len);
osd_req_op_extent_osd_data_pages(req, op_idx,
data_pages, len, 0,
from_pool, false);
@ -1288,12 +1305,13 @@ new_request:
if (IS_ENCRYPTED(inode))
len = round_up(len, CEPH_FSCRYPT_BLOCK_SIZE);
dout("writepages got pages at %llu~%llu\n", offset, len);
doutc(cl, "got pages at %llu~%llu\n", offset, len);
if (IS_ENCRYPTED(inode) &&
((offset | len) & ~CEPH_FSCRYPT_BLOCK_MASK))
pr_warn("%s: bad encrypted write offset=%lld len=%llu\n",
__func__, offset, len);
pr_warn_client(cl,
"bad encrypted write offset=%lld len=%llu\n",
offset, len);
osd_req_op_extent_osd_data_pages(req, op_idx, data_pages, len,
0, from_pool, false);
@ -1345,14 +1363,14 @@ new_request:
done = true;
release_folios:
dout("folio_batch release on %d folios (%p)\n", (int)fbatch.nr,
fbatch.nr ? fbatch.folios[0] : NULL);
doutc(cl, "folio_batch release on %d folios (%p)\n",
(int)fbatch.nr, fbatch.nr ? fbatch.folios[0] : NULL);
folio_batch_release(&fbatch);
}
if (should_loop && !done) {
/* more to do; loop back to beginning of file */
dout("writepages looping back to beginning of file\n");
doutc(cl, "looping back to beginning of file\n");
end = start_index - 1; /* OK even when start_index == 0 */
/* to write dirty pages associated with next snapc,
@ -1390,7 +1408,8 @@ release_folios:
out:
ceph_osdc_put_request(req);
ceph_put_snap_context(last_snapc);
dout("writepages dend - startone, rc = %d\n", rc);
doutc(cl, "%llx.%llx dend - startone, rc = %d\n", ceph_vinop(inode),
rc);
return rc;
}
@ -1424,11 +1443,12 @@ static struct ceph_snap_context *
ceph_find_incompatible(struct page *page)
{
struct inode *inode = page->mapping->host;
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
if (ceph_inode_is_shutdown(inode)) {
dout(" page %p %llx:%llx is shutdown\n", page,
ceph_vinop(inode));
doutc(cl, " %llx.%llx page %p is shutdown\n",
ceph_vinop(inode), page);
return ERR_PTR(-ESTALE);
}
@ -1449,13 +1469,15 @@ ceph_find_incompatible(struct page *page)
if (snapc->seq > oldest->seq) {
/* not writeable -- return it for the caller to deal with */
ceph_put_snap_context(oldest);
dout(" page %p snapc %p not current or oldest\n", page, snapc);
doutc(cl, " %llx.%llx page %p snapc %p not current or oldest\n",
ceph_vinop(inode), page, snapc);
return ceph_get_snap_context(snapc);
}
ceph_put_snap_context(oldest);
/* yay, writeable, do it now (without dropping page lock) */
dout(" page %p snapc %p not current, but oldest\n", page, snapc);
doutc(cl, " %llx.%llx page %p snapc %p not current, but oldest\n",
ceph_vinop(inode), page, snapc);
if (clear_page_dirty_for_io(page)) {
int r = writepage_nounlock(page, NULL);
if (r < 0)
@ -1524,10 +1546,11 @@ static int ceph_write_end(struct file *file, struct address_space *mapping,
{
struct folio *folio = page_folio(subpage);
struct inode *inode = file_inode(file);
struct ceph_client *cl = ceph_inode_to_client(inode);
bool check_cap = false;
dout("write_end file %p inode %p folio %p %d~%d (%d)\n", file,
inode, folio, (int)pos, (int)copied, (int)len);
doutc(cl, "%llx.%llx file %p folio %p %d~%d (%d)\n", ceph_vinop(inode),
file, folio, (int)pos, (int)copied, (int)len);
if (!folio_test_uptodate(folio)) {
/* just return that nothing was copied on a short copy */
@ -1587,6 +1610,7 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_file_info *fi = vma->vm_file->private_data;
loff_t off = (loff_t)vmf->pgoff << PAGE_SHIFT;
int want, got, err;
@ -1598,8 +1622,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
ceph_block_sigs(&oldset);
dout("filemap_fault %p %llx.%llx %llu trying to get caps\n",
inode, ceph_vinop(inode), off);
doutc(cl, "%llx.%llx %llu trying to get caps\n",
ceph_vinop(inode), off);
if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO;
else
@ -1610,8 +1634,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
if (err < 0)
goto out_restore;
dout("filemap_fault %p %llu got cap refs on %s\n",
inode, off, ceph_cap_string(got));
doutc(cl, "%llx.%llx %llu got cap refs on %s\n", ceph_vinop(inode),
off, ceph_cap_string(got));
if ((got & (CEPH_CAP_FILE_CACHE | CEPH_CAP_FILE_LAZYIO)) ||
!ceph_has_inline_data(ci)) {
@ -1619,8 +1643,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
ceph_add_rw_context(fi, &rw_ctx);
ret = filemap_fault(vmf);
ceph_del_rw_context(fi, &rw_ctx);
dout("filemap_fault %p %llu drop cap refs %s ret %x\n",
inode, off, ceph_cap_string(got), ret);
doutc(cl, "%llx.%llx %llu drop cap refs %s ret %x\n",
ceph_vinop(inode), off, ceph_cap_string(got), ret);
} else
err = -EAGAIN;
@ -1661,8 +1685,8 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
ret = VM_FAULT_MAJOR | VM_FAULT_LOCKED;
out_inline:
filemap_invalidate_unlock_shared(mapping);
dout("filemap_fault %p %llu read inline data ret %x\n",
inode, off, ret);
doutc(cl, "%llx.%llx %llu read inline data ret %x\n",
ceph_vinop(inode), off, ret);
}
out_restore:
ceph_restore_sigs(&oldset);
@ -1676,6 +1700,7 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct inode *inode = file_inode(vma->vm_file);
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_file_info *fi = vma->vm_file->private_data;
struct ceph_cap_flush *prealloc_cf;
@ -1702,8 +1727,8 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
else
len = offset_in_thp(page, size);
dout("page_mkwrite %p %llx.%llx %llu~%zd getting caps i_size %llu\n",
inode, ceph_vinop(inode), off, len, size);
doutc(cl, "%llx.%llx %llu~%zd getting caps i_size %llu\n",
ceph_vinop(inode), off, len, size);
if (fi->fmode & CEPH_FILE_MODE_LAZY)
want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
else
@ -1714,8 +1739,8 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
if (err < 0)
goto out_free;
dout("page_mkwrite %p %llu~%zd got cap refs on %s\n",
inode, off, len, ceph_cap_string(got));
doutc(cl, "%llx.%llx %llu~%zd got cap refs on %s\n", ceph_vinop(inode),
off, len, ceph_cap_string(got));
/* Update time before taking page lock */
file_update_time(vma->vm_file);
@ -1763,8 +1788,8 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
__mark_inode_dirty(inode, dirty);
}
dout("page_mkwrite %p %llu~%zd dropping cap refs on %s ret %x\n",
inode, off, len, ceph_cap_string(got), ret);
doutc(cl, "%llx.%llx %llu~%zd dropping cap refs on %s ret %x\n",
ceph_vinop(inode), off, len, ceph_cap_string(got), ret);
ceph_put_cap_refs_async(ci, got);
out_free:
ceph_restore_sigs(&oldset);
@ -1778,6 +1803,7 @@ out_free:
void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
char *data, size_t len)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
struct address_space *mapping = inode->i_mapping;
struct page *page;
@ -1798,8 +1824,8 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
}
}
dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
inode, ceph_vinop(inode), len, locked_page);
doutc(cl, "%p %llx.%llx len %zu locked_page %p\n", inode,
ceph_vinop(inode), len, locked_page);
if (len > 0) {
void *kaddr = kmap_atomic(page);
@ -1823,7 +1849,8 @@ int ceph_uninline_data(struct file *file)
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
struct ceph_osd_request *req = NULL;
struct ceph_cap_flush *prealloc_cf = NULL;
struct folio *folio = NULL;
@ -1836,8 +1863,8 @@ int ceph_uninline_data(struct file *file)
inline_version = ci->i_inline_version;
spin_unlock(&ci->i_ceph_lock);
dout("uninline_data %p %llx.%llx inline_version %llu\n",
inode, ceph_vinop(inode), inline_version);
doutc(cl, "%llx.%llx inline_version %llu\n", ceph_vinop(inode),
inline_version);
if (ceph_inode_is_shutdown(inode)) {
err = -EIO;
@ -1949,8 +1976,8 @@ out_unlock:
}
out:
ceph_free_cap_flush(prealloc_cf);
dout("uninline_data %p %llx.%llx inline_version %llu = %d\n",
inode, ceph_vinop(inode), inline_version, err);
doutc(cl, "%llx.%llx inline_version %llu = %d\n",
ceph_vinop(inode), inline_version, err);
return err;
}
@ -1977,8 +2004,9 @@ enum {
static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
s64 pool, struct ceph_string *pool_ns)
{
struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->netfs.inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(&ci->netfs.inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_client *cl = fsc->client;
struct ceph_osd_request *rd_req = NULL, *wr_req = NULL;
struct rb_node **p, *parent;
struct ceph_pool_perm *perm;
@ -2013,10 +2041,10 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci,
goto out;
if (pool_ns)
dout("__ceph_pool_perm_get pool %lld ns %.*s no perm cached\n",
pool, (int)pool_ns->len, pool_ns->str);
doutc(cl, "pool %lld ns %.*s no perm cached\n", pool,
(int)pool_ns->len, pool_ns->str);
else
dout("__ceph_pool_perm_get pool %lld no perm cached\n", pool);
doutc(cl, "pool %lld no perm cached\n", pool);
down_write(&mdsc->pool_perm_rwsem);
p = &mdsc->pool_perm_tree.rb_node;
@ -2141,15 +2169,16 @@ out:
if (!err)
err = have;
if (pool_ns)
dout("__ceph_pool_perm_get pool %lld ns %.*s result = %d\n",
pool, (int)pool_ns->len, pool_ns->str, err);
doutc(cl, "pool %lld ns %.*s result = %d\n", pool,
(int)pool_ns->len, pool_ns->str, err);
else
dout("__ceph_pool_perm_get pool %lld result = %d\n", pool, err);
doutc(cl, "pool %lld result = %d\n", pool, err);
return err;
}
int ceph_pool_perm_check(struct inode *inode, int need)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_string *pool_ns;
s64 pool;
@ -2168,7 +2197,7 @@ int ceph_pool_perm_check(struct inode *inode, int need)
return 0;
}
if (ceph_test_mount_opt(ceph_inode_to_client(inode),
if (ceph_test_mount_opt(ceph_inode_to_fs_client(inode),
NOPOOLPERM))
return 0;
@ -2179,13 +2208,11 @@ int ceph_pool_perm_check(struct inode *inode, int need)
check:
if (flags & CEPH_I_POOL_PERM) {
if ((need & CEPH_CAP_FILE_RD) && !(flags & CEPH_I_POOL_RD)) {
dout("ceph_pool_perm_check pool %lld no read perm\n",
pool);
doutc(cl, "pool %lld no read perm\n", pool);
return -EPERM;
}
if ((need & CEPH_CAP_FILE_WR) && !(flags & CEPH_I_POOL_WR)) {
dout("ceph_pool_perm_check pool %lld no write perm\n",
pool);
doutc(cl, "pool %lld no write perm\n", pool);
return -EPERM;
}
return 0;

View File

@ -15,7 +15,7 @@
void ceph_fscache_register_inode_cookie(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
/* No caching for filesystem? */
if (!fsc->fscache)

File diff suppressed because it is too large Load Diff

View File

@ -113,7 +113,7 @@ static int ceph_crypt_set_context(struct inode *inode, const void *ctx,
cia.fscrypt_auth = cfa;
ret = __ceph_setattr(inode, &attr, &cia);
ret = __ceph_setattr(&nop_mnt_idmap, inode, &attr, &cia);
if (ret == 0)
inode_set_flags(inode, S_ENCRYPTED, S_ENCRYPTED);
kfree(cia.fscrypt_auth);
@ -129,7 +129,7 @@ static bool ceph_crypt_empty_dir(struct inode *inode)
static const union fscrypt_policy *ceph_get_dummy_policy(struct super_block *sb)
{
return ceph_sb_to_client(sb)->fsc_dummy_enc_policy.policy;
return ceph_sb_to_fs_client(sb)->fsc_dummy_enc_policy.policy;
}
static struct fscrypt_operations ceph_fscrypt_ops = {
@ -212,6 +212,7 @@ void ceph_fscrypt_as_ctx_to_req(struct ceph_mds_request *req,
static struct inode *parse_longname(const struct inode *parent,
const char *name, int *name_len)
{
struct ceph_client *cl = ceph_inode_to_client(parent);
struct inode *dir = NULL;
struct ceph_vino vino = { .snap = CEPH_NOSNAP };
char *inode_number;
@ -223,12 +224,12 @@ static struct inode *parse_longname(const struct inode *parent,
name++;
name_end = strrchr(name, '_');
if (!name_end) {
dout("Failed to parse long snapshot name: %s\n", name);
doutc(cl, "failed to parse long snapshot name: %s\n", name);
return ERR_PTR(-EIO);
}
*name_len = (name_end - name);
if (*name_len <= 0) {
pr_err("Failed to parse long snapshot name\n");
pr_err_client(cl, "failed to parse long snapshot name\n");
return ERR_PTR(-EIO);
}
@ -240,7 +241,7 @@ static struct inode *parse_longname(const struct inode *parent,
return ERR_PTR(-ENOMEM);
ret = kstrtou64(inode_number, 10, &vino.ino);
if (ret) {
dout("Failed to parse inode number: %s\n", name);
doutc(cl, "failed to parse inode number: %s\n", name);
dir = ERR_PTR(ret);
goto out;
}
@ -251,7 +252,7 @@ static struct inode *parse_longname(const struct inode *parent,
/* This can happen if we're not mounting cephfs on the root */
dir = ceph_get_inode(parent->i_sb, vino, NULL);
if (IS_ERR(dir))
dout("Can't find inode %s (%s)\n", inode_number, name);
doutc(cl, "can't find inode %s (%s)\n", inode_number, name);
}
out:
@ -262,6 +263,7 @@ out:
int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name,
char *buf)
{
struct ceph_client *cl = ceph_inode_to_client(parent);
struct inode *dir = parent;
struct qstr iname;
u32 len;
@ -330,7 +332,7 @@ int ceph_encode_encrypted_dname(struct inode *parent, struct qstr *d_name,
/* base64 encode the encrypted name */
elen = ceph_base64_encode(cryptbuf, len, buf);
dout("base64-encoded ciphertext name = %.*s\n", elen, buf);
doutc(cl, "base64-encoded ciphertext name = %.*s\n", elen, buf);
/* To understand the 240 limit, see CEPH_NOHASH_NAME_MAX comments */
WARN_ON(elen > 240);
@ -505,7 +507,10 @@ int ceph_fscrypt_decrypt_block_inplace(const struct inode *inode,
struct page *page, unsigned int len,
unsigned int offs, u64 lblk_num)
{
dout("%s: len %u offs %u blk %llu\n", __func__, len, offs, lblk_num);
struct ceph_client *cl = ceph_inode_to_client(inode);
doutc(cl, "%p %llx.%llx len %u offs %u blk %llu\n", inode,
ceph_vinop(inode), len, offs, lblk_num);
return fscrypt_decrypt_block_inplace(inode, page, len, offs, lblk_num);
}
@ -514,7 +519,10 @@ int ceph_fscrypt_encrypt_block_inplace(const struct inode *inode,
unsigned int offs, u64 lblk_num,
gfp_t gfp_flags)
{
dout("%s: len %u offs %u blk %llu\n", __func__, len, offs, lblk_num);
struct ceph_client *cl = ceph_inode_to_client(inode);
doutc(cl, "%p %llx.%llx len %u offs %u blk %llu\n", inode,
ceph_vinop(inode), len, offs, lblk_num);
return fscrypt_encrypt_block_inplace(inode, page, len, offs, lblk_num,
gfp_flags);
}
@ -583,6 +591,7 @@ int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
u64 off, struct ceph_sparse_extent *map,
u32 ext_cnt)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
int i, ret = 0;
struct ceph_inode_info *ci = ceph_inode(inode);
u64 objno, objoff;
@ -590,7 +599,8 @@ int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
/* Nothing to do for empty array */
if (ext_cnt == 0) {
dout("%s: empty array, ret 0\n", __func__);
doutc(cl, "%p %llx.%llx empty array, ret 0\n", inode,
ceph_vinop(inode));
return 0;
}
@ -604,14 +614,17 @@ int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
int fret;
if ((ext->off | ext->len) & ~CEPH_FSCRYPT_BLOCK_MASK) {
pr_warn("%s: bad encrypted sparse extent idx %d off %llx len %llx\n",
__func__, i, ext->off, ext->len);
pr_warn_client(cl,
"%p %llx.%llx bad encrypted sparse extent "
"idx %d off %llx len %llx\n",
inode, ceph_vinop(inode), i, ext->off,
ext->len);
return -EIO;
}
fret = ceph_fscrypt_decrypt_pages(inode, &page[pgidx],
off + pgsoff, ext->len);
dout("%s: [%d] 0x%llx~0x%llx fret %d\n", __func__, i,
ext->off, ext->len, fret);
doutc(cl, "%p %llx.%llx [%d] 0x%llx~0x%llx fret %d\n", inode,
ceph_vinop(inode), i, ext->off, ext->len, fret);
if (fret < 0) {
if (ret == 0)
ret = fret;
@ -619,7 +632,7 @@ int ceph_fscrypt_decrypt_extents(struct inode *inode, struct page **page,
}
ret = pgsoff + fret;
}
dout("%s: ret %d\n", __func__, ret);
doutc(cl, "ret %d\n", ret);
return ret;
}

View File

@ -81,7 +81,7 @@ static int mdsc_show(struct seq_file *s, void *p)
if (req->r_inode) {
seq_printf(s, " #%llx", ceph_ino(req->r_inode));
} else if (req->r_dentry) {
path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
&pathbase, 0);
if (IS_ERR(path))
path = NULL;
@ -100,7 +100,7 @@ static int mdsc_show(struct seq_file *s, void *p)
}
if (req->r_old_dentry) {
path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen,
path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &pathlen,
&pathbase, 0);
if (IS_ERR(path))
path = NULL;
@ -398,7 +398,7 @@ DEFINE_SIMPLE_ATTRIBUTE(congestion_kb_fops, congestion_kb_get,
void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
{
dout("ceph_fs_debugfs_cleanup\n");
doutc(fsc->client, "begin\n");
debugfs_remove(fsc->debugfs_bdi);
debugfs_remove(fsc->debugfs_congestion_kb);
debugfs_remove(fsc->debugfs_mdsmap);
@ -407,13 +407,14 @@ void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
debugfs_remove(fsc->debugfs_status);
debugfs_remove(fsc->debugfs_mdsc);
debugfs_remove_recursive(fsc->debugfs_metrics_dir);
doutc(fsc->client, "done\n");
}
void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
{
char name[100];
dout("ceph_fs_debugfs_init\n");
doutc(fsc->client, "begin\n");
fsc->debugfs_congestion_kb =
debugfs_create_file("writeback_congestion_kb",
0600,
@ -469,6 +470,7 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
&metrics_size_fops);
debugfs_create_file("caps", 0400, fsc->debugfs_metrics_dir, fsc,
&metrics_caps_fops);
doutc(fsc->client, "done\n");
}

View File

@ -109,7 +109,9 @@ static int fpos_cmp(loff_t l, loff_t r)
* regardless of what dir changes take place on the
* server.
*/
static int note_last_dentry(struct ceph_dir_file_info *dfi, const char *name,
static int note_last_dentry(struct ceph_fs_client *fsc,
struct ceph_dir_file_info *dfi,
const char *name,
int len, unsigned next_offset)
{
char *buf = kmalloc(len+1, GFP_KERNEL);
@ -120,7 +122,7 @@ static int note_last_dentry(struct ceph_dir_file_info *dfi, const char *name,
memcpy(dfi->last_name, name, len);
dfi->last_name[len] = 0;
dfi->next_offset = next_offset;
dout("note_last_dentry '%s'\n", dfi->last_name);
doutc(fsc->client, "'%s'\n", dfi->last_name);
return 0;
}
@ -130,6 +132,7 @@ __dcache_find_get_entry(struct dentry *parent, u64 idx,
struct ceph_readdir_cache_control *cache_ctl)
{
struct inode *dir = d_inode(parent);
struct ceph_client *cl = ceph_inode_to_client(dir);
struct dentry *dentry;
unsigned idx_mask = (PAGE_SIZE / sizeof(struct dentry *)) - 1;
loff_t ptr_pos = idx * sizeof(struct dentry *);
@ -142,7 +145,7 @@ __dcache_find_get_entry(struct dentry *parent, u64 idx,
ceph_readdir_cache_release(cache_ctl);
cache_ctl->page = find_lock_page(&dir->i_data, ptr_pgoff);
if (!cache_ctl->page) {
dout(" page %lu not found\n", ptr_pgoff);
doutc(cl, " page %lu not found\n", ptr_pgoff);
return ERR_PTR(-EAGAIN);
}
/* reading/filling the cache are serialized by
@ -185,13 +188,16 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
struct ceph_dir_file_info *dfi = file->private_data;
struct dentry *parent = file->f_path.dentry;
struct inode *dir = d_inode(parent);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(dir);
struct ceph_client *cl = ceph_inode_to_client(dir);
struct dentry *dentry, *last = NULL;
struct ceph_dentry_info *di;
struct ceph_readdir_cache_control cache_ctl = {};
u64 idx = 0;
int err = 0;
dout("__dcache_readdir %p v%u at %llx\n", dir, (unsigned)shared_gen, ctx->pos);
doutc(cl, "%p %llx.%llx v%u at %llx\n", dir, ceph_vinop(dir),
(unsigned)shared_gen, ctx->pos);
/* search start position */
if (ctx->pos > 2) {
@ -221,7 +227,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
dput(dentry);
}
dout("__dcache_readdir %p cache idx %llu\n", dir, idx);
doutc(cl, "%p %llx.%llx cache idx %llu\n", dir,
ceph_vinop(dir), idx);
}
@ -257,8 +264,8 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
spin_unlock(&dentry->d_lock);
if (emit_dentry) {
dout(" %llx dentry %p %pd %p\n", di->offset,
dentry, dentry, d_inode(dentry));
doutc(cl, " %llx dentry %p %pd %p\n", di->offset,
dentry, dentry, d_inode(dentry));
ctx->pos = di->offset;
if (!dir_emit(ctx, dentry->d_name.name,
dentry->d_name.len, ceph_present_inode(d_inode(dentry)),
@ -281,7 +288,8 @@ out:
if (last) {
int ret;
di = ceph_dentry(last);
ret = note_last_dentry(dfi, last->d_name.name, last->d_name.len,
ret = note_last_dentry(fsc, dfi, last->d_name.name,
last->d_name.len,
fpos_off(di->offset) + 1);
if (ret < 0)
err = ret;
@ -310,20 +318,23 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
struct ceph_dir_file_info *dfi = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_client *cl = fsc->client;
int i;
int err;
unsigned frag = -1;
struct ceph_mds_reply_info_parsed *rinfo;
dout("readdir %p file %p pos %llx\n", inode, file, ctx->pos);
doutc(cl, "%p %llx.%llx file %p pos %llx\n", inode,
ceph_vinop(inode), file, ctx->pos);
if (dfi->file_info.flags & CEPH_F_ATEND)
return 0;
/* always start with . and .. */
if (ctx->pos == 0) {
dout("readdir off 0 -> '.'\n");
doutc(cl, "%p %llx.%llx off 0 -> '.'\n", inode,
ceph_vinop(inode));
if (!dir_emit(ctx, ".", 1, ceph_present_inode(inode),
inode->i_mode >> 12))
return 0;
@ -337,7 +348,8 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
ino = ceph_present_inode(dentry->d_parent->d_inode);
spin_unlock(&dentry->d_lock);
dout("readdir off 1 -> '..'\n");
doutc(cl, "%p %llx.%llx off 1 -> '..'\n", inode,
ceph_vinop(inode));
if (!dir_emit(ctx, "..", 2, ino, inode->i_mode >> 12))
return 0;
ctx->pos = 2;
@ -391,8 +403,8 @@ more:
frag = fpos_frag(ctx->pos);
}
dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
ceph_vinop(inode), frag, dfi->last_name);
doutc(cl, "fetching %p %llx.%llx frag %x offset '%s'\n",
inode, ceph_vinop(inode), frag, dfi->last_name);
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
@ -446,12 +458,12 @@ more:
ceph_mdsc_put_request(req);
return err;
}
dout("readdir got and parsed readdir result=%d on "
"frag %x, end=%d, complete=%d, hash_order=%d\n",
err, frag,
(int)req->r_reply_info.dir_end,
(int)req->r_reply_info.dir_complete,
(int)req->r_reply_info.hash_order);
doutc(cl, "%p %llx.%llx got and parsed readdir result=%d"
"on frag %x, end=%d, complete=%d, hash_order=%d\n",
inode, ceph_vinop(inode), err, frag,
(int)req->r_reply_info.dir_end,
(int)req->r_reply_info.dir_complete,
(int)req->r_reply_info.hash_order);
rinfo = &req->r_reply_info;
if (le32_to_cpu(rinfo->dir_dir->frag) != frag) {
@ -481,7 +493,8 @@ more:
dfi->dir_ordered_count = req->r_dir_ordered_cnt;
}
} else {
dout("readdir !did_prepopulate\n");
doutc(cl, "%p %llx.%llx !did_prepopulate\n", inode,
ceph_vinop(inode));
/* disable readdir cache */
dfi->readdir_cache_idx = -1;
/* preclude from marking dir complete */
@ -494,8 +507,8 @@ more:
rinfo->dir_entries + (rinfo->dir_nr-1);
unsigned next_offset = req->r_reply_info.dir_end ?
2 : (fpos_off(rde->offset) + 1);
err = note_last_dentry(dfi, rde->name, rde->name_len,
next_offset);
err = note_last_dentry(fsc, dfi, rde->name,
rde->name_len, next_offset);
if (err) {
ceph_mdsc_put_request(dfi->last_readdir);
dfi->last_readdir = NULL;
@ -508,9 +521,9 @@ more:
}
rinfo = &dfi->last_readdir->r_reply_info;
dout("readdir frag %x num %d pos %llx chunk first %llx\n",
dfi->frag, rinfo->dir_nr, ctx->pos,
rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
doutc(cl, "%p %llx.%llx frag %x num %d pos %llx chunk first %llx\n",
inode, ceph_vinop(inode), dfi->frag, rinfo->dir_nr, ctx->pos,
rinfo->dir_nr ? rinfo->dir_entries[0].offset : 0LL);
i = 0;
/* search start position */
@ -530,8 +543,9 @@ more:
struct ceph_mds_reply_dir_entry *rde = rinfo->dir_entries + i;
if (rde->offset < ctx->pos) {
pr_warn("%s: rde->offset 0x%llx ctx->pos 0x%llx\n",
__func__, rde->offset, ctx->pos);
pr_warn_client(cl,
"%p %llx.%llx rde->offset 0x%llx ctx->pos 0x%llx\n",
inode, ceph_vinop(inode), rde->offset, ctx->pos);
return -EIO;
}
@ -539,9 +553,9 @@ more:
return -EIO;
ctx->pos = rde->offset;
dout("readdir (%d/%d) -> %llx '%.*s' %p\n",
i, rinfo->dir_nr, ctx->pos,
rde->name_len, rde->name, &rde->inode.in);
doutc(cl, "%p %llx.%llx (%d/%d) -> %llx '%.*s' %p\n", inode,
ceph_vinop(inode), i, rinfo->dir_nr, ctx->pos,
rde->name_len, rde->name, &rde->inode.in);
if (!dir_emit(ctx, rde->name, rde->name_len,
ceph_present_ino(inode->i_sb, le64_to_cpu(rde->inode.in->ino)),
@ -552,7 +566,7 @@ more:
* doesn't have enough memory, etc. So for next readdir
* it will continue.
*/
dout("filldir stopping us...\n");
doutc(cl, "filldir stopping us...\n");
return 0;
}
@ -583,7 +597,8 @@ more:
kfree(dfi->last_name);
dfi->last_name = NULL;
}
dout("readdir next frag is %x\n", frag);
doutc(cl, "%p %llx.%llx next frag is %x\n", inode,
ceph_vinop(inode), frag);
goto more;
}
dfi->file_info.flags |= CEPH_F_ATEND;
@ -598,20 +613,23 @@ more:
spin_lock(&ci->i_ceph_lock);
if (dfi->dir_ordered_count ==
atomic64_read(&ci->i_ordered_count)) {
dout(" marking %p complete and ordered\n", inode);
doutc(cl, " marking %p %llx.%llx complete and ordered\n",
inode, ceph_vinop(inode));
/* use i_size to track number of entries in
* readdir cache */
BUG_ON(dfi->readdir_cache_idx < 0);
i_size_write(inode, dfi->readdir_cache_idx *
sizeof(struct dentry*));
} else {
dout(" marking %p complete\n", inode);
doutc(cl, " marking %llx.%llx complete\n",
ceph_vinop(inode));
}
__ceph_dir_set_complete(ci, dfi->dir_release_count,
dfi->dir_ordered_count);
spin_unlock(&ci->i_ceph_lock);
}
dout("readdir %p file %p done.\n", inode, file);
doutc(cl, "%p %llx.%llx file %p done.\n", inode, ceph_vinop(inode),
file);
return 0;
}
@ -657,6 +675,7 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
{
struct ceph_dir_file_info *dfi = file->private_data;
struct inode *inode = file->f_mapping->host;
struct ceph_client *cl = ceph_inode_to_client(inode);
loff_t retval;
inode_lock(inode);
@ -676,7 +695,8 @@ static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
if (offset >= 0) {
if (need_reset_readdir(dfi, offset)) {
dout("dir_llseek dropping %p content\n", file);
doutc(cl, "%p %llx.%llx dropping %p content\n",
inode, ceph_vinop(inode), file);
reset_readdir(dfi);
} else if (is_hash_order(offset) && offset > file->f_pos) {
/* for hash offset, we don't know if a forward seek
@ -703,8 +723,9 @@ out:
struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
struct dentry *dentry)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
struct inode *parent = d_inode(dentry->d_parent); /* we hold i_rwsem */
struct ceph_client *cl = ceph_inode_to_client(parent);
/* .snap dir? */
if (ceph_snap(parent) == CEPH_NOSNAP &&
@ -713,8 +734,9 @@ struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
struct inode *inode = ceph_get_snapdir(parent);
res = d_splice_alias(inode, dentry);
dout("ENOENT on snapdir %p '%pd', linking to snapdir %p. Spliced dentry %p\n",
dentry, dentry, inode, res);
doutc(cl, "ENOENT on snapdir %p '%pd', linking to "
"snapdir %p %llx.%llx. Spliced dentry %p\n",
dentry, dentry, inode, ceph_vinop(inode), res);
if (res)
dentry = res;
}
@ -735,12 +757,15 @@ struct dentry *ceph_handle_snapdir(struct ceph_mds_request *req,
struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
struct dentry *dentry, int err)
{
struct ceph_client *cl = req->r_mdsc->fsc->client;
if (err == -ENOENT) {
/* no trace? */
err = 0;
if (!req->r_reply_info.head->is_dentry) {
dout("ENOENT and no trace, dentry %p inode %p\n",
dentry, d_inode(dentry));
doutc(cl,
"ENOENT and no trace, dentry %p inode %llx.%llx\n",
dentry, ceph_vinop(d_inode(dentry)));
if (d_really_is_positive(dentry)) {
d_drop(dentry);
err = -ENOENT;
@ -771,15 +796,16 @@ static bool is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_client *cl = fsc->client;
struct ceph_mds_request *req;
int op;
int mask;
int err;
dout("lookup %p dentry %p '%pd'\n",
dir, dentry, dentry);
doutc(cl, "%p %llx.%llx/'%pd' dentry %p\n", dir, ceph_vinop(dir),
dentry, dentry);
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
@ -802,7 +828,8 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
struct ceph_dentry_info *di = ceph_dentry(dentry);
spin_lock(&ci->i_ceph_lock);
dout(" dir %p flags are 0x%lx\n", dir, ci->i_ceph_flags);
doutc(cl, " dir %llx.%llx flags are 0x%lx\n",
ceph_vinop(dir), ci->i_ceph_flags);
if (strncmp(dentry->d_name.name,
fsc->mount_options->snapdir_name,
dentry->d_name.len) &&
@ -812,7 +839,8 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
__ceph_caps_issued_mask_metric(ci, CEPH_CAP_FILE_SHARED, 1)) {
__ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_RD);
spin_unlock(&ci->i_ceph_lock);
dout(" dir %p complete, -ENOENT\n", dir);
doutc(cl, " dir %llx.%llx complete, -ENOENT\n",
ceph_vinop(dir));
d_add(dentry, NULL);
di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
return NULL;
@ -850,7 +878,7 @@ static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
}
dentry = ceph_finish_lookup(req, dentry, err);
ceph_mdsc_put_request(req); /* will dput(dentry) */
dout("lookup result=%p\n", dentry);
doutc(cl, "result=%p\n", dentry);
return dentry;
}
@ -885,6 +913,7 @@ static int ceph_mknod(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode, dev_t rdev)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct ceph_acl_sec_ctx as_ctx = {};
int err;
@ -901,8 +930,8 @@ static int ceph_mknod(struct mnt_idmap *idmap, struct inode *dir,
goto out;
}
dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
dir, dentry, mode, rdev);
doutc(cl, "%p %llx.%llx/'%pd' dentry %p mode 0%ho rdev %d\n",
dir, ceph_vinop(dir), dentry, dentry, mode, rdev);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@ -924,6 +953,7 @@ static int ceph_mknod(struct mnt_idmap *idmap, struct inode *dir,
req->r_parent = dir;
ihold(dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_mnt_idmap = mnt_idmap_get(idmap);
req->r_args.mknod.mode = cpu_to_le32(mode);
req->r_args.mknod.rdev = cpu_to_le32(rdev);
req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
@ -993,6 +1023,7 @@ static int ceph_symlink(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, const char *dest)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct ceph_acl_sec_ctx as_ctx = {};
umode_t mode = S_IFLNK | 0777;
@ -1010,7 +1041,8 @@ static int ceph_symlink(struct mnt_idmap *idmap, struct inode *dir,
goto out;
}
dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
doutc(cl, "%p %llx.%llx/'%pd' to '%s'\n", dir, ceph_vinop(dir), dentry,
dest);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@ -1040,6 +1072,7 @@ static int ceph_symlink(struct mnt_idmap *idmap, struct inode *dir,
}
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
req->r_mnt_idmap = mnt_idmap_get(idmap);
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
@ -1064,6 +1097,7 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
struct dentry *dentry, umode_t mode)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct ceph_acl_sec_ctx as_ctx = {};
int err;
@ -1076,10 +1110,11 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
if (ceph_snap(dir) == CEPH_SNAPDIR) {
/* mkdir .snap/foo is a MKSNAP */
op = CEPH_MDS_OP_MKSNAP;
dout("mksnap dir %p snap '%pd' dn %p\n", dir,
dentry, dentry);
doutc(cl, "mksnap %llx.%llx/'%pd' dentry %p\n",
ceph_vinop(dir), dentry, dentry);
} else if (ceph_snap(dir) == CEPH_NOSNAP) {
dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
doutc(cl, "mkdir %llx.%llx/'%pd' dentry %p mode 0%ho\n",
ceph_vinop(dir), dentry, dentry, mode);
op = CEPH_MDS_OP_MKDIR;
} else {
err = -EROFS;
@ -1117,6 +1152,8 @@ static int ceph_mkdir(struct mnt_idmap *idmap, struct inode *dir,
req->r_parent = dir;
ihold(dir);
set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
if (op == CEPH_MDS_OP_MKDIR)
req->r_mnt_idmap = mnt_idmap_get(idmap);
req->r_args.mkdir.mode = cpu_to_le32(mode);
req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL |
CEPH_CAP_XATTR_EXCL;
@ -1144,6 +1181,7 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
int err;
@ -1161,8 +1199,8 @@ static int ceph_link(struct dentry *old_dentry, struct inode *dir,
if (err)
return err;
dout("link in dir %p %llx.%llx old_dentry %p:'%pd' dentry %p:'%pd'\n",
dir, ceph_vinop(dir), old_dentry, old_dentry, dentry, dentry);
doutc(cl, "%p %llx.%llx/'%pd' to '%pd'\n", dir, ceph_vinop(dir),
old_dentry, dentry);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
if (IS_ERR(req)) {
d_drop(dentry);
@ -1199,14 +1237,16 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
struct dentry *dentry = req->r_dentry;
struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
struct ceph_client *cl = fsc->client;
struct ceph_dentry_info *di = ceph_dentry(dentry);
int result = req->r_err ? req->r_err :
le32_to_cpu(req->r_reply_info.head->result);
if (!test_bit(CEPH_DENTRY_ASYNC_UNLINK_BIT, &di->flags))
pr_warn("%s dentry %p:%pd async unlink bit is not set\n",
__func__, dentry, dentry);
pr_warn_client(cl,
"dentry %p:%pd async unlink bit is not set\n",
dentry, dentry);
spin_lock(&fsc->async_unlink_conflict_lock);
hash_del_rcu(&di->hnode);
@ -1226,7 +1266,7 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
if (result) {
int pathlen = 0;
u64 base = 0;
char *path = ceph_mdsc_build_path(dentry, &pathlen,
char *path = ceph_mdsc_build_path(mdsc, dentry, &pathlen,
&base, 0);
/* mark error on parent + clear complete */
@ -1240,8 +1280,8 @@ static void ceph_async_unlink_cb(struct ceph_mds_client *mdsc,
/* mark inode itself for an error (since metadata is bogus) */
mapping_set_error(req->r_old_inode->i_mapping, result);
pr_warn("async unlink failure path=(%llx)%s result=%d!\n",
base, IS_ERR(path) ? "<<bad>>" : path, result);
pr_warn_client(cl, "failure path=(%llx)%s result=%d!\n",
base, IS_ERR(path) ? "<<bad>>" : path, result);
ceph_mdsc_free_path(path, pathlen);
}
out:
@ -1290,7 +1330,8 @@ static int get_caps_for_async_unlink(struct inode *dir, struct dentry *dentry)
*/
static int ceph_unlink(struct inode *dir, struct dentry *dentry)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct inode *inode = d_inode(dentry);
struct ceph_mds_request *req;
@ -1300,11 +1341,12 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
if (ceph_snap(dir) == CEPH_SNAPDIR) {
/* rmdir .snap/foo is RMSNAP */
dout("rmsnap dir %p '%pd' dn %p\n", dir, dentry, dentry);
doutc(cl, "rmsnap %llx.%llx/'%pd' dn\n", ceph_vinop(dir),
dentry);
op = CEPH_MDS_OP_RMSNAP;
} else if (ceph_snap(dir) == CEPH_NOSNAP) {
dout("unlink/rmdir dir %p dn %p inode %p\n",
dir, dentry, inode);
doutc(cl, "unlink/rmdir %llx.%llx/'%pd' inode %llx.%llx\n",
ceph_vinop(dir), dentry, ceph_vinop(inode));
op = d_is_dir(dentry) ?
CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
} else
@ -1327,9 +1369,9 @@ retry:
(req->r_dir_caps = get_caps_for_async_unlink(dir, dentry))) {
struct ceph_dentry_info *di = ceph_dentry(dentry);
dout("async unlink on %llu/%.*s caps=%s", ceph_ino(dir),
dentry->d_name.len, dentry->d_name.name,
ceph_cap_string(req->r_dir_caps));
doutc(cl, "async unlink on %llx.%llx/'%pd' caps=%s",
ceph_vinop(dir), dentry,
ceph_cap_string(req->r_dir_caps));
set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
req->r_callback = ceph_async_unlink_cb;
req->r_old_inode = d_inode(dentry);
@ -1384,6 +1426,7 @@ static int ceph_rename(struct mnt_idmap *idmap, struct inode *old_dir,
struct dentry *new_dentry, unsigned int flags)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(old_dir->i_sb);
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
int op = CEPH_MDS_OP_RENAME;
int err;
@ -1413,8 +1456,9 @@ static int ceph_rename(struct mnt_idmap *idmap, struct inode *old_dir,
if (err)
return err;
dout("rename dir %p dentry %p to dir %p dentry %p\n",
old_dir, old_dentry, new_dir, new_dentry);
doutc(cl, "%llx.%llx/'%pd' to %llx.%llx/'%pd'\n",
ceph_vinop(old_dir), old_dentry, ceph_vinop(new_dir),
new_dentry);
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
@ -1459,9 +1503,10 @@ static int ceph_rename(struct mnt_idmap *idmap, struct inode *old_dir,
void __ceph_dentry_lease_touch(struct ceph_dentry_info *di)
{
struct dentry *dn = di->dentry;
struct ceph_mds_client *mdsc;
struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc;
struct ceph_client *cl = mdsc->fsc->client;
dout("dentry_lease_touch %p %p '%pd'\n", di, dn, dn);
doutc(cl, "%p %p '%pd'\n", di, dn, dn);
di->flags |= CEPH_DENTRY_LEASE_LIST;
if (di->flags & CEPH_DENTRY_SHRINK_LIST) {
@ -1469,7 +1514,6 @@ void __ceph_dentry_lease_touch(struct ceph_dentry_info *di)
return;
}
mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
spin_lock(&mdsc->dentry_list_lock);
list_move_tail(&di->lease_list, &mdsc->dentry_leases);
spin_unlock(&mdsc->dentry_list_lock);
@ -1493,10 +1537,10 @@ static void __dentry_dir_lease_touch(struct ceph_mds_client* mdsc,
void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
{
struct dentry *dn = di->dentry;
struct ceph_mds_client *mdsc;
struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dn->d_sb)->mdsc;
struct ceph_client *cl = mdsc->fsc->client;
dout("dentry_dir_lease_touch %p %p '%pd' (offset 0x%llx)\n",
di, dn, dn, di->offset);
doutc(cl, "%p %p '%pd' (offset 0x%llx)\n", di, dn, dn, di->offset);
if (!list_empty(&di->lease_list)) {
if (di->flags & CEPH_DENTRY_LEASE_LIST) {
@ -1516,7 +1560,6 @@ void __ceph_dentry_dir_lease_touch(struct ceph_dentry_info *di)
return;
}
mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
spin_lock(&mdsc->dentry_list_lock);
__dentry_dir_lease_touch(mdsc, di),
spin_unlock(&mdsc->dentry_list_lock);
@ -1530,7 +1573,7 @@ static void __dentry_lease_unlist(struct ceph_dentry_info *di)
if (list_empty(&di->lease_list))
return;
mdsc = ceph_sb_to_client(di->dentry->d_sb)->mdsc;
mdsc = ceph_sb_to_fs_client(di->dentry->d_sb)->mdsc;
spin_lock(&mdsc->dentry_list_lock);
list_del_init(&di->lease_list);
spin_unlock(&mdsc->dentry_list_lock);
@ -1757,6 +1800,8 @@ static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags)
{
struct ceph_dentry_info *di;
struct ceph_mds_session *session = NULL;
struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dentry->d_sb)->mdsc;
struct ceph_client *cl = mdsc->fsc->client;
u32 seq = 0;
int valid = 0;
@ -1789,7 +1834,7 @@ static int dentry_lease_is_valid(struct dentry *dentry, unsigned int flags)
CEPH_MDS_LEASE_RENEW, seq);
ceph_put_mds_session(session);
}
dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
doutc(cl, "dentry %p = %d\n", dentry, valid);
return valid;
}
@ -1832,6 +1877,7 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry,
struct ceph_mds_client *mdsc)
{
struct ceph_inode_info *ci = ceph_inode(dir);
struct ceph_client *cl = mdsc->fsc->client;
int valid;
int shared_gen;
@ -1853,8 +1899,9 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry,
valid = 0;
spin_unlock(&dentry->d_lock);
}
dout("dir_lease_is_valid dir %p v%u dentry %p = %d\n",
dir, (unsigned)atomic_read(&ci->i_shared_gen), dentry, valid);
doutc(cl, "dir %p %llx.%llx v%u dentry %p '%pd' = %d\n", dir,
ceph_vinop(dir), (unsigned)atomic_read(&ci->i_shared_gen),
dentry, dentry, valid);
return valid;
}
@ -1863,10 +1910,11 @@ static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry,
*/
static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
{
struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(dentry->d_sb)->mdsc;
struct ceph_client *cl = mdsc->fsc->client;
int valid = 0;
struct dentry *parent;
struct inode *dir, *inode;
struct ceph_mds_client *mdsc;
valid = fscrypt_d_revalidate(dentry, flags);
if (valid <= 0)
@ -1884,16 +1932,16 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
inode = d_inode(dentry);
}
dout("d_revalidate %p '%pd' inode %p offset 0x%llx nokey %d\n", dentry,
dentry, inode, ceph_dentry(dentry)->offset,
!!(dentry->d_flags & DCACHE_NOKEY_NAME));
doutc(cl, "%p '%pd' inode %p offset 0x%llx nokey %d\n",
dentry, dentry, inode, ceph_dentry(dentry)->offset,
!!(dentry->d_flags & DCACHE_NOKEY_NAME));
mdsc = ceph_sb_to_client(dir->i_sb)->mdsc;
mdsc = ceph_sb_to_fs_client(dir->i_sb)->mdsc;
/* always trust cached snapped dentries, snapdir dentry */
if (ceph_snap(dir) != CEPH_NOSNAP) {
dout("d_revalidate %p '%pd' inode %p is SNAPPED\n", dentry,
dentry, inode);
doutc(cl, "%p '%pd' inode %p is SNAPPED\n", dentry,
dentry, inode);
valid = 1;
} else if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
valid = 1;
@ -1948,14 +1996,14 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
break;
}
ceph_mdsc_put_request(req);
dout("d_revalidate %p lookup result=%d\n",
dentry, err);
doutc(cl, "%p '%pd', lookup result=%d\n", dentry,
dentry, err);
}
} else {
percpu_counter_inc(&mdsc->metric.d_lease_hit);
}
dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
doutc(cl, "%p '%pd' %s\n", dentry, dentry, valid ? "valid" : "invalid");
if (!valid)
ceph_dir_clear_complete(dir);
@ -1995,9 +2043,9 @@ static int ceph_d_delete(const struct dentry *dentry)
static void ceph_d_release(struct dentry *dentry)
{
struct ceph_dentry_info *di = ceph_dentry(dentry);
struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dentry->d_sb);
dout("d_release %p\n", dentry);
doutc(fsc->client, "dentry %p '%pd'\n", dentry, dentry);
atomic64_dec(&fsc->mdsc->metric.total_dentries);
@ -2018,10 +2066,12 @@ static void ceph_d_release(struct dentry *dentry)
*/
static void ceph_d_prune(struct dentry *dentry)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dentry->d_sb);
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *dir_ci;
struct ceph_dentry_info *di;
dout("ceph_d_prune %pd %p\n", dentry, dentry);
doutc(cl, "dentry %p '%pd'\n", dentry, dentry);
/* do we have a valid parent? */
if (IS_ROOT(dentry))
@ -2064,7 +2114,7 @@ static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
int left;
const int bufsize = 1024;
if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
if (!ceph_test_mount_opt(ceph_sb_to_fs_client(inode->i_sb), DIRSTAT))
return -EISDIR;
if (!dfi->dir_info) {

View File

@ -36,6 +36,7 @@ struct ceph_nfs_snapfh {
static int ceph_encode_snapfh(struct inode *inode, u32 *rawfh, int *max_len,
struct inode *parent_inode)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
static const int snap_handle_length =
sizeof(struct ceph_nfs_snapfh) >> 2;
struct ceph_nfs_snapfh *sfh = (void *)rawfh;
@ -79,13 +80,14 @@ static int ceph_encode_snapfh(struct inode *inode, u32 *rawfh, int *max_len,
*max_len = snap_handle_length;
ret = FILEID_BTRFS_WITH_PARENT;
out:
dout("encode_snapfh %llx.%llx ret=%d\n", ceph_vinop(inode), ret);
doutc(cl, "%p %llx.%llx ret=%d\n", inode, ceph_vinop(inode), ret);
return ret;
}
static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
struct inode *parent_inode)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
static const int handle_length =
sizeof(struct ceph_nfs_fh) >> 2;
static const int connected_handle_length =
@ -105,15 +107,15 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
if (parent_inode) {
struct ceph_nfs_confh *cfh = (void *)rawfh;
dout("encode_fh %llx with parent %llx\n",
ceph_ino(inode), ceph_ino(parent_inode));
doutc(cl, "%p %llx.%llx with parent %p %llx.%llx\n", inode,
ceph_vinop(inode), parent_inode, ceph_vinop(parent_inode));
cfh->ino = ceph_ino(inode);
cfh->parent_ino = ceph_ino(parent_inode);
*max_len = connected_handle_length;
type = FILEID_INO32_GEN_PARENT;
} else {
struct ceph_nfs_fh *fh = (void *)rawfh;
dout("encode_fh %llx\n", ceph_ino(inode));
doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
fh->ino = ceph_ino(inode);
*max_len = handle_length;
type = FILEID_INO32_GEN;
@ -123,7 +125,7 @@ static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
static struct inode *__lookup_inode(struct super_block *sb, u64 ino)
{
struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
struct inode *inode;
struct ceph_vino vino;
int err;
@ -205,7 +207,8 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
struct ceph_nfs_snapfh *sfh,
bool want_parent)
{
struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
struct inode *inode;
struct ceph_vino vino;
@ -278,11 +281,10 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
ceph_mdsc_put_request(req);
if (want_parent) {
dout("snapfh_to_parent %llx.%llx\n err=%d\n",
vino.ino, vino.snap, err);
doutc(cl, "%llx.%llx\n err=%d\n", vino.ino, vino.snap, err);
} else {
dout("snapfh_to_dentry %llx.%llx parent %llx hash %x err=%d",
vino.ino, vino.snap, sfh->parent_ino, sfh->hash, err);
doutc(cl, "%llx.%llx parent %llx hash %x err=%d", vino.ino,
vino.snap, sfh->parent_ino, sfh->hash, err);
}
if (IS_ERR(inode))
return ERR_CAST(inode);
@ -297,6 +299,7 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb,
struct fid *fid,
int fh_len, int fh_type)
{
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
struct ceph_nfs_fh *fh = (void *)fid->raw;
if (fh_type == FILEID_BTRFS_WITH_PARENT) {
@ -310,14 +313,14 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb,
if (fh_len < sizeof(*fh) / 4)
return NULL;
dout("fh_to_dentry %llx\n", fh->ino);
doutc(fsc->client, "%llx\n", fh->ino);
return __fh_to_dentry(sb, fh->ino);
}
static struct dentry *__get_parent(struct super_block *sb,
struct dentry *child, u64 ino)
{
struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc;
struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(sb)->mdsc;
struct ceph_mds_request *req;
struct inode *inode;
int mask;
@ -363,6 +366,7 @@ static struct dentry *__get_parent(struct super_block *sb,
static struct dentry *ceph_get_parent(struct dentry *child)
{
struct inode *inode = d_inode(child);
struct ceph_client *cl = ceph_inode_to_client(inode);
struct dentry *dn;
if (ceph_snap(inode) != CEPH_NOSNAP) {
@ -402,8 +406,8 @@ static struct dentry *ceph_get_parent(struct dentry *child)
dn = __get_parent(child->d_sb, child, 0);
}
out:
dout("get_parent %p ino %llx.%llx err=%ld\n",
child, ceph_vinop(inode), (long)PTR_ERR_OR_ZERO(dn));
doutc(cl, "child %p %p %llx.%llx err=%ld\n", child, inode,
ceph_vinop(inode), (long)PTR_ERR_OR_ZERO(dn));
return dn;
}
@ -414,6 +418,7 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
struct fid *fid,
int fh_len, int fh_type)
{
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
struct ceph_nfs_confh *cfh = (void *)fid->raw;
struct dentry *dentry;
@ -427,7 +432,7 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb,
if (fh_len < sizeof(*cfh) / 4)
return NULL;
dout("fh_to_parent %llx\n", cfh->parent_ino);
doutc(fsc->client, "%llx\n", cfh->parent_ino);
dentry = __get_parent(sb, NULL, cfh->ino);
if (unlikely(dentry == ERR_PTR(-ENOENT)))
dentry = __fh_to_dentry(sb, cfh->parent_ino);
@ -439,7 +444,7 @@ static int __get_snap_name(struct dentry *parent, char *name,
{
struct inode *inode = d_inode(child);
struct inode *dir = d_inode(parent);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_mds_request *req = NULL;
char *last_name = NULL;
unsigned next_offset = 2;
@ -526,8 +531,8 @@ out:
if (req)
ceph_mdsc_put_request(req);
kfree(last_name);
dout("get_snap_name %p ino %llx.%llx err=%d\n",
child, ceph_vinop(inode), err);
doutc(fsc->client, "child dentry %p %p %llx.%llx err=%d\n", child,
inode, ceph_vinop(inode), err);
return err;
}
@ -544,7 +549,7 @@ static int ceph_get_name(struct dentry *parent, char *name,
if (ceph_snap(inode) != CEPH_NOSNAP)
return __get_snap_name(parent, name, child);
mdsc = ceph_inode_to_client(inode)->mdsc;
mdsc = ceph_inode_to_fs_client(inode)->mdsc;
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPNAME,
USE_ANY_MDS);
if (IS_ERR(req))
@ -588,9 +593,9 @@ static int ceph_get_name(struct dentry *parent, char *name,
ceph_fname_free_buffer(dir, &oname);
}
out:
dout("get_name %p ino %llx.%llx err %d %s%s\n",
child, ceph_vinop(inode), err,
err ? "" : "name ", err ? "" : name);
doutc(mdsc->fsc->client, "child dentry %p %p %llx.%llx err %d %s%s\n",
child, inode, ceph_vinop(inode), err, err ? "" : "name ",
err ? "" : name);
ceph_mdsc_put_request(req);
return err;
}

View File

@ -19,8 +19,9 @@
#include "io.h"
#include "metric.h"
static __le32 ceph_flags_sys2wire(u32 flags)
static __le32 ceph_flags_sys2wire(struct ceph_mds_client *mdsc, u32 flags)
{
struct ceph_client *cl = mdsc->fsc->client;
u32 wire_flags = 0;
switch (flags & O_ACCMODE) {
@ -48,7 +49,7 @@ static __le32 ceph_flags_sys2wire(u32 flags)
#undef ceph_sys2wire
if (flags)
dout("unused open flags: %x\n", flags);
doutc(cl, "unused open flags: %x\n", flags);
return cpu_to_le32(wire_flags);
}
@ -189,7 +190,7 @@ prepare_open_request(struct super_block *sb, int flags, int create_mode)
if (IS_ERR(req))
goto out;
req->r_fmode = ceph_flags_to_mode(flags);
req->r_args.open.flags = ceph_flags_sys2wire(flags);
req->r_args.open.flags = ceph_flags_sys2wire(mdsc, flags);
req->r_args.open.mode = cpu_to_le32(create_mode);
out:
return req;
@ -200,12 +201,13 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mount_options *opt =
ceph_inode_to_client(&ci->netfs.inode)->mount_options;
ceph_inode_to_fs_client(&ci->netfs.inode)->mount_options;
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_file_info *fi;
int ret;
dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
inode->i_mode, isdir ? "dir" : "regular");
doutc(cl, "%p %llx.%llx %p 0%o (%s)\n", inode, ceph_vinop(inode),
file, inode->i_mode, isdir ? "dir" : "regular");
BUG_ON(inode->i_fop->release != ceph_release);
if (isdir) {
@ -234,7 +236,7 @@ static int ceph_init_file_info(struct inode *inode, struct file *file,
spin_lock_init(&fi->rw_contexts_lock);
INIT_LIST_HEAD(&fi->rw_contexts);
fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
fi->filp_gen = READ_ONCE(ceph_inode_to_fs_client(inode)->filp_gen);
if ((file->f_mode & FMODE_WRITE) && ceph_has_inline_data(ci)) {
ret = ceph_uninline_data(file);
@ -259,6 +261,7 @@ error:
*/
static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
int ret = 0;
switch (inode->i_mode & S_IFMT) {
@ -271,13 +274,13 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
break;
case S_IFLNK:
dout("init_file %p %p 0%o (symlink)\n", inode, file,
inode->i_mode);
doutc(cl, "%p %llx.%llx %p 0%o (symlink)\n", inode,
ceph_vinop(inode), file, inode->i_mode);
break;
default:
dout("init_file %p %p 0%o (special)\n", inode, file,
inode->i_mode);
doutc(cl, "%p %llx.%llx %p 0%o (special)\n", inode,
ceph_vinop(inode), file, inode->i_mode);
/*
* we need to drop the open ref now, since we don't
* have .release set to ceph_release.
@ -296,6 +299,7 @@ static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
int ceph_renew_caps(struct inode *inode, int fmode)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req;
int err, flags, wanted;
@ -307,8 +311,9 @@ int ceph_renew_caps(struct inode *inode, int fmode)
(!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
int issued = __ceph_caps_issued(ci, NULL);
spin_unlock(&ci->i_ceph_lock);
dout("renew caps %p want %s issued %s updating mds_wanted\n",
inode, ceph_cap_string(wanted), ceph_cap_string(issued));
doutc(cl, "%p %llx.%llx want %s issued %s updating mds_wanted\n",
inode, ceph_vinop(inode), ceph_cap_string(wanted),
ceph_cap_string(issued));
ceph_check_caps(ci, 0);
return 0;
}
@ -339,7 +344,8 @@ int ceph_renew_caps(struct inode *inode, int fmode)
err = ceph_mdsc_do_request(mdsc, NULL, req);
ceph_mdsc_put_request(req);
out:
dout("renew caps %p open result=%d\n", inode, err);
doutc(cl, "%p %llx.%llx open result=%d\n", inode, ceph_vinop(inode),
err);
return err < 0 ? err : 0;
}
@ -352,7 +358,8 @@ out:
int ceph_open(struct inode *inode, struct file *file)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
struct ceph_file_info *fi = file->private_data;
@ -360,7 +367,7 @@ int ceph_open(struct inode *inode, struct file *file)
int flags, fmode, wanted;
if (fi) {
dout("open file %p is already opened\n", file);
doutc(cl, "file %p is already opened\n", file);
return 0;
}
@ -374,8 +381,8 @@ int ceph_open(struct inode *inode, struct file *file)
return err;
}
dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
ceph_vinop(inode), file, flags, file->f_flags);
doutc(cl, "%p %llx.%llx file %p flags %d (%d)\n", inode,
ceph_vinop(inode), file, flags, file->f_flags);
fmode = ceph_flags_to_mode(flags);
wanted = ceph_caps_for_mode(fmode);
@ -399,9 +406,9 @@ int ceph_open(struct inode *inode, struct file *file)
int mds_wanted = __ceph_caps_mds_wanted(ci, true);
int issued = __ceph_caps_issued(ci, NULL);
dout("open %p fmode %d want %s issued %s using existing\n",
inode, fmode, ceph_cap_string(wanted),
ceph_cap_string(issued));
doutc(cl, "open %p fmode %d want %s issued %s using existing\n",
inode, fmode, ceph_cap_string(wanted),
ceph_cap_string(issued));
__ceph_touch_fmode(ci, mdsc, fmode);
spin_unlock(&ci->i_ceph_lock);
@ -421,7 +428,7 @@ int ceph_open(struct inode *inode, struct file *file)
spin_unlock(&ci->i_ceph_lock);
dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
doutc(cl, "open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
req = prepare_open_request(inode->i_sb, flags, 0);
if (IS_ERR(req)) {
err = PTR_ERR(req);
@ -435,7 +442,7 @@ int ceph_open(struct inode *inode, struct file *file)
if (!err)
err = ceph_init_file(inode, file, req->r_fmode);
ceph_mdsc_put_request(req);
dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
doutc(cl, "open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
out:
return err;
}
@ -515,6 +522,7 @@ no_async:
static void restore_deleg_ino(struct inode *dir, u64 ino)
{
struct ceph_client *cl = ceph_inode_to_client(dir);
struct ceph_inode_info *ci = ceph_inode(dir);
struct ceph_mds_session *s = NULL;
@ -525,7 +533,8 @@ static void restore_deleg_ino(struct inode *dir, u64 ino)
if (s) {
int err = ceph_restore_deleg_ino(s, ino);
if (err)
pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
pr_warn_client(cl,
"unable to restore delegated ino 0x%llx to session: %d\n",
ino, err);
ceph_put_mds_session(s);
}
@ -557,6 +566,7 @@ static void wake_async_create_waiters(struct inode *inode,
static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
struct ceph_client *cl = mdsc->fsc->client;
struct dentry *dentry = req->r_dentry;
struct inode *dinode = d_inode(dentry);
struct inode *tinode = req->r_target_inode;
@ -574,10 +584,11 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
if (result) {
int pathlen = 0;
u64 base = 0;
char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
char *path = ceph_mdsc_build_path(mdsc, req->r_dentry, &pathlen,
&base, 0);
pr_warn("async create failure path=(%llx)%s result=%d!\n",
pr_warn_client(cl,
"async create failure path=(%llx)%s result=%d!\n",
base, IS_ERR(path) ? "<<bad>>" : path, result);
ceph_mdsc_free_path(path, pathlen);
@ -596,14 +607,15 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
u64 ino = ceph_vino(tinode).ino;
if (req->r_deleg_ino != ino)
pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
__func__, req->r_err, req->r_deleg_ino, ino);
pr_warn_client(cl,
"inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
req->r_err, req->r_deleg_ino, ino);
mapping_set_error(tinode->i_mapping, result);
wake_async_create_waiters(tinode, req->r_session);
} else if (!result) {
pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
req->r_deleg_ino);
pr_warn_client(cl, "no req->r_target_inode for 0x%llx\n",
req->r_deleg_ino);
}
out:
ceph_mdsc_release_dir_caps(req);
@ -625,6 +637,7 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
struct timespec64 now;
struct ceph_string *pool_ns;
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_vino vino = { .ino = req->r_deleg_ino,
.snap = CEPH_NOSNAP };
@ -655,7 +668,9 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
in.truncate_seq = cpu_to_le32(1);
in.truncate_size = cpu_to_le64(-1ULL);
in.xattr_version = cpu_to_le64(1);
in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
in.uid = cpu_to_le32(from_kuid(&init_user_ns,
mapped_fsuid(req->r_mnt_idmap,
&init_user_ns)));
if (dir->i_mode & S_ISGID) {
in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
@ -663,7 +678,9 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
if (S_ISDIR(mode))
mode |= S_ISGID;
} else {
in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
in.gid = cpu_to_le32(from_kgid(&init_user_ns,
mapped_fsgid(req->r_mnt_idmap,
&init_user_ns)));
}
in.mode = cpu_to_le32((u32)mode);
@ -683,7 +700,7 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
req->r_fmode, NULL);
up_read(&mdsc->snap_rwsem);
if (ret) {
dout("%s failed to fill inode: %d\n", __func__, ret);
doutc(cl, "failed to fill inode: %d\n", ret);
ceph_dir_clear_complete(dir);
if (!d_unhashed(dentry))
d_drop(dentry);
@ -691,8 +708,8 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
} else {
struct dentry *dn;
dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
vino.ino, ceph_ino(dir), dentry->d_name.name);
doutc(cl, "d_adding new inode 0x%llx to 0x%llx/%s\n",
vino.ino, ceph_ino(dir), dentry->d_name.name);
ceph_dir_clear_ordered(dir);
ceph_init_inode_acls(inode, as_ctx);
if (inode->i_state & I_NEW) {
@ -730,7 +747,9 @@ static int ceph_finish_async_create(struct inode *dir, struct inode *inode,
int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
struct file *file, unsigned flags, umode_t mode)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct mnt_idmap *idmap = file_mnt_idmap(file);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(dir->i_sb);
struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
struct inode *new_inode = NULL;
@ -740,9 +759,9 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
int mask;
int err;
dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
dir, dentry, dentry,
d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
doutc(cl, "%p %llx.%llx dentry %p '%pd' %s flags %d mode 0%o\n",
dir, ceph_vinop(dir), dentry, dentry,
d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
if (dentry->d_name.len > NAME_MAX)
return -ENAMETOOLONG;
@ -788,6 +807,8 @@ retry:
mask |= CEPH_CAP_XATTR_SHARED;
req->r_args.open.mask = cpu_to_le32(mask);
req->r_parent = dir;
if (req->r_op == CEPH_MDS_OP_CREATE)
req->r_mnt_idmap = mnt_idmap_get(idmap);
ihold(dir);
if (IS_ENCRYPTED(dir)) {
set_bit(CEPH_MDS_R_FSCRYPT_FILE, &req->r_req_flags);
@ -880,17 +901,18 @@ retry:
goto out_req;
if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
/* make vfs retry on splice, ENOENT, or symlink */
dout("atomic_open finish_no_open on dn %p\n", dn);
doutc(cl, "finish_no_open on dn %p\n", dn);
err = finish_no_open(file, dn);
} else {
if (IS_ENCRYPTED(dir) &&
!fscrypt_has_permitted_context(dir, d_inode(dentry))) {
pr_warn("Inconsistent encryption context (parent %llx:%llx child %llx:%llx)\n",
pr_warn_client(cl,
"Inconsistent encryption context (parent %llx:%llx child %llx:%llx)\n",
ceph_vinop(dir), ceph_vinop(d_inode(dentry)));
goto out_req;
}
dout("atomic_open finish_open on dn %p\n", dn);
doutc(cl, "finish_open on dn %p\n", dn);
if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
struct inode *newino = d_inode(dentry);
@ -905,17 +927,19 @@ out_req:
iput(new_inode);
out_ctx:
ceph_release_acl_sec_ctx(&as_ctx);
dout("atomic_open result=%d\n", err);
doutc(cl, "result=%d\n", err);
return err;
}
int ceph_release(struct inode *inode, struct file *file)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
if (S_ISDIR(inode->i_mode)) {
struct ceph_dir_file_info *dfi = file->private_data;
dout("release inode %p dir file %p\n", inode, file);
doutc(cl, "%p %llx.%llx dir file %p\n", inode,
ceph_vinop(inode), file);
WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
ceph_put_fmode(ci, dfi->file_info.fmode, 1);
@ -927,7 +951,8 @@ int ceph_release(struct inode *inode, struct file *file)
kmem_cache_free(ceph_dir_file_cachep, dfi);
} else {
struct ceph_file_info *fi = file->private_data;
dout("release inode %p regular file %p\n", inode, file);
doutc(cl, "%p %llx.%llx regular file %p\n", inode,
ceph_vinop(inode), file);
WARN_ON(!list_empty(&fi->rw_contexts));
ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
@ -962,7 +987,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
u64 *last_objver)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
struct ceph_osd_client *osdc = &fsc->client->osdc;
ssize_t ret;
u64 off = *ki_pos;
@ -971,7 +997,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
bool sparse = IS_ENCRYPTED(inode) || ceph_test_mount_opt(fsc, SPARSEREAD);
u64 objver = 0;
dout("sync_read on inode %p %llx~%llx\n", inode, *ki_pos, len);
doutc(cl, "on inode %p %llx.%llx %llx~%llx\n", inode,
ceph_vinop(inode), *ki_pos, len);
if (ceph_inode_is_shutdown(inode))
return -EIO;
@ -1005,8 +1032,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
/* determine new offset/length if encrypted */
ceph_fscrypt_adjust_off_and_len(inode, &read_off, &read_len);
dout("sync_read orig %llu~%llu reading %llu~%llu",
off, len, read_off, read_len);
doutc(cl, "orig %llu~%llu reading %llu~%llu", off, len,
read_off, read_len);
req = ceph_osdc_new_request(osdc, &ci->i_layout,
ci->i_vino, read_off, &read_len, 0, 1,
@ -1059,8 +1086,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
objver = req->r_version;
i_size = i_size_read(inode);
dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
off, len, ret, i_size, (more ? " MORE" : ""));
doutc(cl, "%llu~%llu got %zd i_size %llu%s\n", off, len,
ret, i_size, (more ? " MORE" : ""));
/* Fix it to go to end of extent map */
if (sparse && ret >= 0)
@ -1101,8 +1128,8 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
int zlen = min(len - ret, i_size - off - ret);
int zoff = page_off + ret;
dout("sync_read zero gap %llu~%llu\n",
off + ret, off + ret + zlen);
doutc(cl, "zero gap %llu~%llu\n", off + ret,
off + ret + zlen);
ceph_zero_page_vector_range(zoff, zlen, pages);
ret += zlen;
}
@ -1151,7 +1178,7 @@ ssize_t __ceph_sync_read(struct inode *inode, loff_t *ki_pos,
if (last_objver)
*last_objver = objver;
}
dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
doutc(cl, "result %zd retry_op %d\n", ret, *retry_op);
return ret;
}
@ -1160,9 +1187,11 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct ceph_client *cl = ceph_inode_to_client(inode);
dout("sync_read on file %p %llx~%zx %s\n", file, iocb->ki_pos,
iov_iter_count(to), (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
doutc(cl, "on file %p %llx~%zx %s\n", file, iocb->ki_pos,
iov_iter_count(to),
(file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
return __ceph_sync_read(inode, &iocb->ki_pos, to, retry_op, NULL);
}
@ -1190,6 +1219,7 @@ static void ceph_aio_retry_work(struct work_struct *work);
static void ceph_aio_complete(struct inode *inode,
struct ceph_aio_request *aio_req)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
int ret;
@ -1203,7 +1233,7 @@ static void ceph_aio_complete(struct inode *inode,
if (!ret)
ret = aio_req->total_len;
dout("ceph_aio_complete %p rc %d\n", inode, ret);
doutc(cl, "%p %llx.%llx rc %d\n", inode, ceph_vinop(inode), ret);
if (ret >= 0 && aio_req->write) {
int dirty;
@ -1242,11 +1272,13 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
unsigned int len = osd_data->bvec_pos.iter.bi_size;
bool sparse = (op->op == CEPH_OSD_OP_SPARSE_READ);
struct ceph_client *cl = ceph_inode_to_client(inode);
BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
BUG_ON(!osd_data->num_bvecs);
dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
doutc(cl, "req %p inode %p %llx.%llx, rc %d bytes %u\n", req,
inode, ceph_vinop(inode), rc, len);
if (rc == -EOLDSNAPC) {
struct ceph_aio_work *aio_work;
@ -1256,7 +1288,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
if (aio_work) {
INIT_WORK(&aio_work->work, ceph_aio_retry_work);
aio_work->req = req;
queue_work(ceph_inode_to_client(inode)->inode_wq,
queue_work(ceph_inode_to_fs_client(inode)->inode_wq,
&aio_work->work);
return;
}
@ -1386,7 +1418,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
struct ceph_client_metric *metric = &fsc->mdsc->metric;
struct ceph_vino vino;
struct ceph_osd_request *req;
@ -1405,9 +1438,9 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
return -EROFS;
dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
(write ? "write" : "read"), file, pos, (unsigned)count,
snapc, snapc ? snapc->seq : 0);
doutc(cl, "sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
(write ? "write" : "read"), file, pos, (unsigned)count,
snapc, snapc ? snapc->seq : 0);
if (write) {
int ret2;
@ -1418,7 +1451,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
pos >> PAGE_SHIFT,
(pos + count - 1) >> PAGE_SHIFT);
if (ret2 < 0)
dout("invalidate_inode_pages2_range returned %d\n", ret2);
doutc(cl, "invalidate_inode_pages2_range returned %d\n",
ret2);
flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
} else {
@ -1610,7 +1644,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
struct file *file = iocb->ki_filp;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
struct ceph_osd_client *osdc = &fsc->client->osdc;
struct ceph_osd_request *req;
struct page **pages;
@ -1625,8 +1660,8 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
return -EROFS;
dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
file, pos, (unsigned)count, snapc, snapc->seq);
doutc(cl, "on file %p %lld~%u snapc %p seq %lld\n", file, pos,
(unsigned)count, snapc, snapc->seq);
ret = filemap_write_and_wait_range(inode->i_mapping,
pos, pos + count - 1);
@ -1670,9 +1705,9 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
last = (pos + len) != (write_pos + write_len);
rmw = first || last;
dout("sync_write ino %llx %lld~%llu adjusted %lld~%llu -- %srmw\n",
ci->i_vino.ino, pos, len, write_pos, write_len,
rmw ? "" : "no ");
doutc(cl, "ino %llx %lld~%llu adjusted %lld~%llu -- %srmw\n",
ci->i_vino.ino, pos, len, write_pos, write_len,
rmw ? "" : "no ");
/*
* The data is emplaced into the page as it would be if it were
@ -1881,7 +1916,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
left -= ret;
}
if (ret < 0) {
dout("sync_write write failed with %d\n", ret);
doutc(cl, "write failed with %d\n", ret);
ceph_release_page_vector(pages, num_pages);
break;
}
@ -1891,7 +1926,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
write_pos, write_len,
GFP_KERNEL);
if (ret < 0) {
dout("encryption failed with %d\n", ret);
doutc(cl, "encryption failed with %d\n", ret);
ceph_release_page_vector(pages, num_pages);
break;
}
@ -1910,7 +1945,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
break;
}
dout("sync_write write op %lld~%llu\n", write_pos, write_len);
doutc(cl, "write op %lld~%llu\n", write_pos, write_len);
osd_req_op_extent_osd_data_pages(req, rmw ? 1 : 0, pages, write_len,
offset_in_page(write_pos), false,
true);
@ -1941,7 +1976,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
req->r_end_latency, len, ret);
ceph_osdc_put_request(req);
if (ret != 0) {
dout("sync_write osd write returned %d\n", ret);
doutc(cl, "osd write returned %d\n", ret);
/* Version changed! Must re-do the rmw cycle */
if ((assert_ver && (ret == -ERANGE || ret == -EOVERFLOW)) ||
(!assert_ver && ret == -EEXIST)) {
@ -1971,13 +2006,13 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
pos >> PAGE_SHIFT,
(pos + len - 1) >> PAGE_SHIFT);
if (ret < 0) {
dout("invalidate_inode_pages2_range returned %d\n",
ret);
doutc(cl, "invalidate_inode_pages2_range returned %d\n",
ret);
ret = 0;
}
pos += len;
written += len;
dout("sync_write written %d\n", written);
doutc(cl, "written %d\n", written);
if (pos > i_size_read(inode)) {
check_caps = ceph_inode_set_size(inode, pos);
if (check_caps)
@ -1991,7 +2026,7 @@ ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
ret = written;
iocb->ki_pos = pos;
}
dout("sync_write returning %d\n", ret);
doutc(cl, "returning %d\n", ret);
return ret;
}
@ -2010,13 +2045,14 @@ static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
struct inode *inode = file_inode(filp);
struct ceph_inode_info *ci = ceph_inode(inode);
bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
struct ceph_client *cl = ceph_inode_to_client(inode);
ssize_t ret;
int want = 0, got = 0;
int retry_op = 0, read = 0;
again:
dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
doutc(cl, "%llu~%u trying to get caps on %p %llx.%llx\n",
iocb->ki_pos, (unsigned)len, inode, ceph_vinop(inode));
if (ceph_inode_is_shutdown(inode))
return -ESTALE;
@ -2044,9 +2080,9 @@ again:
(iocb->ki_flags & IOCB_DIRECT) ||
(fi->flags & CEPH_F_SYNC)) {
dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
ceph_cap_string(got));
doutc(cl, "sync %p %llx.%llx %llu~%u got cap refs on %s\n",
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
ceph_cap_string(got));
if (!ceph_has_inline_data(ci)) {
if (!retry_op &&
@ -2064,16 +2100,16 @@ again:
}
} else {
CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
ceph_cap_string(got));
doutc(cl, "async %p %llx.%llx %llu~%u got cap refs on %s\n",
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
ceph_cap_string(got));
ceph_add_rw_context(fi, &rw_ctx);
ret = generic_file_read_iter(iocb, to);
ceph_del_rw_context(fi, &rw_ctx);
}
dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
doutc(cl, "%p %llx.%llx dropping cap refs on %s = %d\n",
inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
ceph_put_cap_refs(ci, got);
if (direct_lock)
@ -2133,8 +2169,8 @@ again:
/* hit EOF or hole? */
if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
ret < len) {
dout("sync_read hit hole, ppos %lld < size %lld"
", reading more\n", iocb->ki_pos, i_size);
doutc(cl, "hit hole, ppos %lld < size %lld, reading more\n",
iocb->ki_pos, i_size);
read += ret;
len -= ret;
@ -2228,7 +2264,8 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_client *cl = fsc->client;
struct ceph_osd_client *osdc = &fsc->client->osdc;
struct ceph_cap_flush *prealloc_cf;
ssize_t count, written = 0;
@ -2296,8 +2333,9 @@ retry_snap:
if (err)
goto out;
dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
inode, ceph_vinop(inode), pos, count, i_size_read(inode));
doutc(cl, "%p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
inode, ceph_vinop(inode), pos, count,
i_size_read(inode));
if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
want |= CEPH_CAP_FILE_BUFFER;
if (fi->fmode & CEPH_FILE_MODE_LAZY)
@ -2313,8 +2351,8 @@ retry_snap:
inode_inc_iversion_raw(inode);
dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
doutc(cl, "%p %llx.%llx %llu~%zd got cap refs on %s\n",
inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
(iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
@ -2374,14 +2412,14 @@ retry_snap:
ceph_check_caps(ci, CHECK_CAPS_FLUSH);
}
dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
inode, ceph_vinop(inode), pos, (unsigned)count,
ceph_cap_string(got));
doutc(cl, "%p %llx.%llx %llu~%u dropping cap refs on %s\n",
inode, ceph_vinop(inode), pos, (unsigned)count,
ceph_cap_string(got));
ceph_put_cap_refs(ci, got);
if (written == -EOLDSNAPC) {
dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
inode, ceph_vinop(inode), pos, (unsigned)count);
doutc(cl, "%p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
inode, ceph_vinop(inode), pos, (unsigned)count);
goto retry_snap;
}
@ -2462,7 +2500,7 @@ static int ceph_zero_partial_object(struct inode *inode,
loff_t offset, loff_t *length)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
struct ceph_osd_request *req;
int ret = 0;
loff_t zero = 0;
@ -2553,14 +2591,15 @@ static long ceph_fallocate(struct file *file, int mode,
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_cap_flush *prealloc_cf;
struct ceph_client *cl = ceph_inode_to_client(inode);
int want, got = 0;
int dirty;
int ret = 0;
loff_t endoff = 0;
loff_t size;
dout("%s %p %llx.%llx mode %x, offset %llu length %llu\n", __func__,
inode, ceph_vinop(inode), mode, offset, length);
doutc(cl, "%p %llx.%llx mode %x, offset %llu length %llu\n",
inode, ceph_vinop(inode), mode, offset, length);
if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
return -EOPNOTSUPP;
@ -2689,6 +2728,7 @@ static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
loff_t src_off, loff_t dst_off, size_t len)
{
struct ceph_client *cl = ceph_inode_to_client(src_inode);
loff_t size, endoff;
size = i_size_read(src_inode);
@ -2699,8 +2739,8 @@ static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
* inode.
*/
if (src_off + len > size) {
dout("Copy beyond EOF (%llu + %zu > %llu)\n",
src_off, len, size);
doutc(cl, "Copy beyond EOF (%llu + %zu > %llu)\n", src_off,
len, size);
return -EOPNOTSUPP;
}
size = i_size_read(dst_inode);
@ -2776,6 +2816,7 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
u32 src_objlen, dst_objlen;
u32 object_size = src_ci->i_layout.object_size;
struct ceph_client *cl = fsc->client;
int ret;
src_oloc.pool = src_ci->i_layout.pool_id;
@ -2817,9 +2858,10 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
if (ret) {
if (ret == -EOPNOTSUPP) {
fsc->have_copy_from2 = false;
pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
pr_notice_client(cl,
"OSDs don't support copy-from2; disabling copy offload\n");
}
dout("ceph_osdc_copy_from returned %d\n", ret);
doutc(cl, "returned %d\n", ret);
if (!bytes)
bytes = ret;
goto out;
@ -2845,7 +2887,8 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
struct ceph_inode_info *src_ci = ceph_inode(src_inode);
struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
struct ceph_cap_flush *prealloc_cf;
struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
struct ceph_fs_client *src_fsc = ceph_inode_to_fs_client(src_inode);
struct ceph_client *cl = src_fsc->client;
loff_t size;
ssize_t ret = -EIO, bytes;
u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
@ -2853,7 +2896,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
int src_got = 0, dst_got = 0, err, dirty;
if (src_inode->i_sb != dst_inode->i_sb) {
struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
struct ceph_fs_client *dst_fsc = ceph_inode_to_fs_client(dst_inode);
if (ceph_fsid_compare(&src_fsc->client->fsid,
&dst_fsc->client->fsid)) {
@ -2888,7 +2931,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
(src_ci->i_layout.stripe_count != 1) ||
(dst_ci->i_layout.stripe_count != 1) ||
(src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
dout("Invalid src/dst files layout\n");
doutc(cl, "Invalid src/dst files layout\n");
return -EOPNOTSUPP;
}
@ -2906,12 +2949,12 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
/* Start by sync'ing the source and destination files */
ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
if (ret < 0) {
dout("failed to write src file (%zd)\n", ret);
doutc(cl, "failed to write src file (%zd)\n", ret);
goto out;
}
ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
if (ret < 0) {
dout("failed to write dst file (%zd)\n", ret);
doutc(cl, "failed to write dst file (%zd)\n", ret);
goto out;
}
@ -2923,7 +2966,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
err = get_rd_wr_caps(src_file, &src_got,
dst_file, (dst_off + len), &dst_got);
if (err < 0) {
dout("get_rd_wr_caps returned %d\n", err);
doutc(cl, "get_rd_wr_caps returned %d\n", err);
ret = -EOPNOTSUPP;
goto out;
}
@ -2938,7 +2981,8 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
dst_off >> PAGE_SHIFT,
(dst_off + len) >> PAGE_SHIFT);
if (ret < 0) {
dout("Failed to invalidate inode pages (%zd)\n", ret);
doutc(cl, "Failed to invalidate inode pages (%zd)\n",
ret);
ret = 0; /* XXX */
}
ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
@ -2959,7 +3003,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
* starting at the src_off
*/
if (src_objoff) {
dout("Initial partial copy of %u bytes\n", src_objlen);
doutc(cl, "Initial partial copy of %u bytes\n", src_objlen);
/*
* we need to temporarily drop all caps as we'll be calling
@ -2970,7 +3014,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
&dst_off, src_objlen, flags);
/* Abort on short copies or on error */
if (ret < (long)src_objlen) {
dout("Failed partial copy (%zd)\n", ret);
doutc(cl, "Failed partial copy (%zd)\n", ret);
goto out;
}
len -= ret;
@ -2992,7 +3036,7 @@ static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
ret = bytes;
goto out_caps;
}
dout("Copied %zu bytes out of %zu\n", bytes, len);
doutc(cl, "Copied %zu bytes out of %zu\n", bytes, len);
len -= bytes;
ret += bytes;
@ -3020,13 +3064,13 @@ out_caps:
* there were errors in remote object copies (len >= object_size).
*/
if (len && (len < src_ci->i_layout.object_size)) {
dout("Final partial copy of %zu bytes\n", len);
doutc(cl, "Final partial copy of %zu bytes\n", len);
bytes = do_splice_direct(src_file, &src_off, dst_file,
&dst_off, len, flags);
if (bytes > 0)
ret += bytes;
else
dout("Failed partial copy (%zd)\n", bytes);
doutc(cl, "Failed partial copy (%zd)\n", bytes);
}
out:

File diff suppressed because it is too large Load Diff

View File

@ -65,7 +65,7 @@ static long __validate_layout(struct ceph_mds_client *mdsc,
static long ceph_ioctl_set_layout(struct file *file, void __user *arg)
{
struct inode *inode = file_inode(file);
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
struct ceph_mds_request *req;
struct ceph_ioctl_layout l;
struct ceph_inode_info *ci = ceph_inode(file_inode(file));
@ -140,7 +140,7 @@ static long ceph_ioctl_set_layout_policy (struct file *file, void __user *arg)
struct ceph_mds_request *req;
struct ceph_ioctl_layout l;
int err;
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
/* copy and validate */
if (copy_from_user(&l, arg, sizeof(l)))
@ -183,7 +183,7 @@ static long ceph_ioctl_get_dataloc(struct file *file, void __user *arg)
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_osd_client *osdc =
&ceph_sb_to_client(inode->i_sb)->client->osdc;
&ceph_sb_to_fs_client(inode->i_sb)->client->osdc;
struct ceph_object_locator oloc;
CEPH_DEFINE_OID_ONSTACK(oid);
u32 xlen;
@ -244,7 +244,8 @@ static long ceph_ioctl_lazyio(struct file *file)
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
struct ceph_client *cl = mdsc->fsc->client;
if ((fi->fmode & CEPH_FILE_MODE_LAZY) == 0) {
spin_lock(&ci->i_ceph_lock);
@ -252,11 +253,13 @@ static long ceph_ioctl_lazyio(struct file *file)
ci->i_nr_by_mode[ffs(CEPH_FILE_MODE_LAZY)]++;
__ceph_touch_fmode(ci, mdsc, fi->fmode);
spin_unlock(&ci->i_ceph_lock);
dout("ioctl_layzio: file %p marked lazy\n", file);
doutc(cl, "file %p %p %llx.%llx marked lazy\n", file, inode,
ceph_vinop(inode));
ceph_check_caps(ci, 0);
} else {
dout("ioctl_layzio: file %p already lazy\n", file);
doutc(cl, "file %p %p %llx.%llx already lazy\n", file, inode,
ceph_vinop(inode));
}
return 0;
}
@ -355,10 +358,12 @@ static const char *ceph_ioctl_cmd_name(const unsigned int cmd)
long ceph_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct inode *inode = file_inode(file);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
int ret;
dout("ioctl file %p cmd %s arg %lu\n", file,
ceph_ioctl_cmd_name(cmd), arg);
doutc(fsc->client, "file %p %p %llx.%llx cmd %s arg %lu\n", file,
inode, ceph_vinop(inode), ceph_ioctl_cmd_name(cmd), arg);
switch (cmd) {
case CEPH_IOC_GET_LAYOUT:
return ceph_ioctl_get_layout(file, (void __user *)arg);

View File

@ -77,6 +77,7 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
int cmd, u8 wait, struct file_lock *fl)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *req;
int err;
u64 length = 0;
@ -111,10 +112,10 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
owner = secure_addr(fl->fl_owner);
dout("ceph_lock_message: rule: %d, op: %d, owner: %llx, pid: %llu, "
"start: %llu, length: %llu, wait: %d, type: %d\n", (int)lock_type,
(int)operation, owner, (u64)fl->fl_pid, fl->fl_start, length,
wait, fl->fl_type);
doutc(cl, "rule: %d, op: %d, owner: %llx, pid: %llu, "
"start: %llu, length: %llu, wait: %d, type: %d\n",
(int)lock_type, (int)operation, owner, (u64)fl->fl_pid,
fl->fl_start, length, wait, fl->fl_type);
req->r_args.filelock_change.rule = lock_type;
req->r_args.filelock_change.type = cmd;
@ -147,16 +148,17 @@ static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
}
ceph_mdsc_put_request(req);
dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
"length: %llu, wait: %d, type: %d, err code %d\n", (int)lock_type,
(int)operation, (u64)fl->fl_pid, fl->fl_start,
length, wait, fl->fl_type, err);
doutc(cl, "rule: %d, op: %d, pid: %llu, start: %llu, "
"length: %llu, wait: %d, type: %d, err code %d\n",
(int)lock_type, (int)operation, (u64)fl->fl_pid,
fl->fl_start, length, wait, fl->fl_type, err);
return err;
}
static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
struct ceph_mds_request *req)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_request *intr_req;
struct inode *inode = req->r_inode;
int err, lock_type;
@ -174,8 +176,7 @@ static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
if (!err)
return 0;
dout("ceph_lock_wait_for_completion: request %llu was interrupted\n",
req->r_tid);
doutc(cl, "request %llu was interrupted\n", req->r_tid);
mutex_lock(&mdsc->mutex);
if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
@ -246,6 +247,7 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_client *cl = ceph_inode_to_client(inode);
int err = 0;
u16 op = CEPH_MDS_OP_SETFILELOCK;
u8 wait = 0;
@ -257,7 +259,7 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
if (ceph_inode_is_shutdown(inode))
return -ESTALE;
dout("ceph_lock, fl_owner: %p\n", fl->fl_owner);
doutc(cl, "fl_owner: %p\n", fl->fl_owner);
/* set wait bit as appropriate, then make command as Ceph expects it*/
if (IS_GETLK(cmd))
@ -292,7 +294,7 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl);
if (!err) {
if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK != fl->fl_type) {
dout("mds locked, locking locally\n");
doutc(cl, "locking locally\n");
err = posix_lock_file(file, fl, NULL);
if (err) {
/* undo! This should only happen if
@ -300,8 +302,8 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
* deadlock. */
ceph_lock_message(CEPH_LOCK_FCNTL, op, inode,
CEPH_LOCK_UNLOCK, 0, fl);
dout("got %d on posix_lock_file, undid lock\n",
err);
doutc(cl, "got %d on posix_lock_file, undid lock\n",
err);
}
}
}
@ -312,6 +314,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_client *cl = ceph_inode_to_client(inode);
int err = 0;
u8 wait = 0;
u8 lock_cmd;
@ -322,7 +325,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
if (ceph_inode_is_shutdown(inode))
return -ESTALE;
dout("ceph_flock, fl_file: %p\n", fl->fl_file);
doutc(cl, "fl_file: %p\n", fl->fl_file);
spin_lock(&ci->i_ceph_lock);
if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
@ -359,7 +362,8 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
ceph_lock_message(CEPH_LOCK_FLOCK,
CEPH_MDS_OP_SETFILELOCK,
inode, CEPH_LOCK_UNLOCK, 0, fl);
dout("got %d on locks_lock_file_wait, undid lock\n", err);
doutc(cl, "got %d on locks_lock_file_wait, undid lock\n",
err);
}
}
return err;
@ -371,6 +375,7 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
*/
void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
struct file_lock *lock;
struct file_lock_context *ctx;
@ -386,17 +391,20 @@ void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
++(*flock_count);
spin_unlock(&ctx->flc_lock);
}
dout("counted %d flock locks and %d fcntl locks\n",
*flock_count, *fcntl_count);
doutc(cl, "counted %d flock locks and %d fcntl locks\n",
*flock_count, *fcntl_count);
}
/*
* Given a pointer to a lock, convert it to a ceph filelock
*/
static int lock_to_ceph_filelock(struct file_lock *lock,
static int lock_to_ceph_filelock(struct inode *inode,
struct file_lock *lock,
struct ceph_filelock *cephlock)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
int err = 0;
cephlock->start = cpu_to_le64(lock->fl_start);
cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
cephlock->client = cpu_to_le64(0);
@ -414,7 +422,7 @@ static int lock_to_ceph_filelock(struct file_lock *lock,
cephlock->type = CEPH_LOCK_UNLOCK;
break;
default:
dout("Have unknown lock type %d\n", lock->fl_type);
doutc(cl, "Have unknown lock type %d\n", lock->fl_type);
err = -EINVAL;
}
@ -432,13 +440,14 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
{
struct file_lock *lock;
struct file_lock_context *ctx = locks_inode_context(inode);
struct ceph_client *cl = ceph_inode_to_client(inode);
int err = 0;
int seen_fcntl = 0;
int seen_flock = 0;
int l = 0;
dout("encoding %d flock and %d fcntl locks\n", num_flock_locks,
num_fcntl_locks);
doutc(cl, "encoding %d flock and %d fcntl locks\n", num_flock_locks,
num_fcntl_locks);
if (!ctx)
return 0;
@ -450,7 +459,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
err = -ENOSPC;
goto fail;
}
err = lock_to_ceph_filelock(lock, &flocks[l]);
err = lock_to_ceph_filelock(inode, lock, &flocks[l]);
if (err)
goto fail;
++l;
@ -461,7 +470,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
err = -ENOSPC;
goto fail;
}
err = lock_to_ceph_filelock(lock, &flocks[l]);
err = lock_to_ceph_filelock(inode, lock, &flocks[l]);
if (err)
goto fail;
++l;

File diff suppressed because it is too large Load Diff

View File

@ -14,9 +14,9 @@
#include <linux/ceph/types.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/mdsmap.h>
#include <linux/ceph/auth.h>
#include "mdsmap.h"
#include "metric.h"
#include "super.h"
@ -33,8 +33,10 @@ enum ceph_feature_type {
CEPHFS_FEATURE_NOTIFY_SESSION_STATE,
CEPHFS_FEATURE_OP_GETVXATTR,
CEPHFS_FEATURE_32BITS_RETRY_FWD,
CEPHFS_FEATURE_NEW_SNAPREALM_INFO,
CEPHFS_FEATURE_HAS_OWNER_UIDGID,
CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_32BITS_RETRY_FWD,
CEPHFS_FEATURE_MAX = CEPHFS_FEATURE_HAS_OWNER_UIDGID,
};
#define CEPHFS_FEATURES_CLIENT_SUPPORTED { \
@ -49,6 +51,7 @@ enum ceph_feature_type {
CEPHFS_FEATURE_NOTIFY_SESSION_STATE, \
CEPHFS_FEATURE_OP_GETVXATTR, \
CEPHFS_FEATURE_32BITS_RETRY_FWD, \
CEPHFS_FEATURE_HAS_OWNER_UIDGID, \
}
/*
@ -300,6 +303,7 @@ struct ceph_mds_request {
int r_fmode; /* file mode, if expecting cap */
int r_request_release_offset;
const struct cred *r_cred;
struct mnt_idmap *r_mnt_idmap;
struct timespec64 r_stamp;
/* for choosing which mds to send this request to */
@ -581,7 +585,8 @@ static inline void ceph_mdsc_free_path(char *path, int len)
__putname(path - (PATH_MAX - 1 - len));
}
extern char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *base,
extern char *ceph_mdsc_build_path(struct ceph_mds_client *mdsc,
struct dentry *dentry, int *plen, u64 *base,
int for_wire);
extern void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry);
@ -614,4 +619,6 @@ static inline int ceph_wait_on_async_create(struct inode *inode)
extern int ceph_wait_on_conflict_unlink(struct dentry *dentry);
extern u64 ceph_get_deleg_ino(struct ceph_mds_session *session);
extern int ceph_restore_deleg_ino(struct ceph_mds_session *session, u64 ino);
extern bool enable_unsafe_idmap;
#endif

View File

@ -7,10 +7,11 @@
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/ceph/mdsmap.h>
#include <linux/ceph/messenger.h>
#include <linux/ceph/decode.h>
#include "mdsmap.h"
#include "mds_client.h"
#include "super.h"
#define CEPH_MDS_IS_READY(i, ignore_laggy) \
@ -114,8 +115,10 @@ bad:
* Ignore any fields we don't care about (there are quite a few of
* them).
*/
struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
void *end, bool msgr2)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mdsmap *m;
const void *start = *p;
int i, j, n;
@ -233,20 +236,18 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
*p = info_end;
}
dout("mdsmap_decode %d/%d %lld mds%d.%d %s %s%s\n",
i+1, n, global_id, mds, inc,
ceph_pr_addr(&addr),
ceph_mds_state_name(state),
laggy ? "(laggy)" : "");
doutc(cl, "%d/%d %lld mds%d.%d %s %s%s\n", i+1, n, global_id,
mds, inc, ceph_pr_addr(&addr),
ceph_mds_state_name(state), laggy ? "(laggy)" : "");
if (mds < 0 || mds >= m->possible_max_rank) {
pr_warn("mdsmap_decode got incorrect mds(%d)\n", mds);
pr_warn_client(cl, "got incorrect mds(%d)\n", mds);
continue;
}
if (state <= 0) {
dout("mdsmap_decode got incorrect state(%s)\n",
ceph_mds_state_name(state));
doutc(cl, "got incorrect state(%s)\n",
ceph_mds_state_name(state));
continue;
}
@ -385,16 +386,16 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
m->m_max_xattr_size = 0;
}
bad_ext:
dout("mdsmap_decode m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
!!m->m_enabled, !!m->m_damaged, m->m_num_laggy);
doutc(cl, "m_enabled: %d, m_damaged: %d, m_num_laggy: %d\n",
!!m->m_enabled, !!m->m_damaged, m->m_num_laggy);
*p = end;
dout("mdsmap_decode success epoch %u\n", m->m_epoch);
doutc(cl, "success epoch %u\n", m->m_epoch);
return m;
nomem:
err = -ENOMEM;
goto out_err;
corrupt:
pr_err("corrupt mdsmap\n");
pr_err_client(cl, "corrupt mdsmap\n");
print_hex_dump(KERN_DEBUG, "mdsmap: ",
DUMP_PREFIX_OFFSET, 16, 1,
start, end - start, true);

View File

@ -5,6 +5,8 @@
#include <linux/bug.h>
#include <linux/ceph/types.h>
struct ceph_mds_client;
/*
* mds map - describe servers in the mds cluster.
*
@ -65,7 +67,8 @@ static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
}
extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m);
struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2);
struct ceph_mdsmap *ceph_mdsmap_decode(struct ceph_mds_client *mdsc, void **p,
void *end, bool msgr2);
extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m);
extern bool ceph_mdsmap_is_cluster_available(struct ceph_mdsmap *m);

View File

@ -31,6 +31,7 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
struct ceph_client_metric *m = &mdsc->metric;
u64 nr_caps = atomic64_read(&m->total_caps);
u32 header_len = sizeof(struct ceph_metric_header);
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_msg *msg;
s64 sum;
s32 items = 0;
@ -51,8 +52,8 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true);
if (!msg) {
pr_err("send metrics to mds%d, failed to allocate message\n",
s->s_mds);
pr_err_client(cl, "to mds%d, failed to allocate message\n",
s->s_mds);
return false;
}

View File

@ -43,6 +43,7 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
{
struct super_block *sb = mdsc->fsc->sb;
struct ceph_mds_quota *h = msg->front.iov_base;
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_vino vino;
struct inode *inode;
struct ceph_inode_info *ci;
@ -51,8 +52,8 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
return;
if (msg->front.iov_len < sizeof(*h)) {
pr_err("%s corrupt message mds%d len %d\n", __func__,
session->s_mds, (int)msg->front.iov_len);
pr_err_client(cl, "corrupt message mds%d len %d\n",
session->s_mds, (int)msg->front.iov_len);
ceph_msg_dump(msg);
goto out;
}
@ -62,7 +63,7 @@ void ceph_handle_quota(struct ceph_mds_client *mdsc,
vino.snap = CEPH_NOSNAP;
inode = ceph_find_inode(sb, vino);
if (!inode) {
pr_warn("Failed to find inode %llu\n", vino.ino);
pr_warn_client(cl, "failed to find inode %llx\n", vino.ino);
goto out;
}
ci = ceph_inode(inode);
@ -85,6 +86,7 @@ find_quotarealm_inode(struct ceph_mds_client *mdsc, u64 ino)
{
struct ceph_quotarealm_inode *qri = NULL;
struct rb_node **node, *parent = NULL;
struct ceph_client *cl = mdsc->fsc->client;
mutex_lock(&mdsc->quotarealms_inodes_mutex);
node = &(mdsc->quotarealms_inodes.rb_node);
@ -110,7 +112,7 @@ find_quotarealm_inode(struct ceph_mds_client *mdsc, u64 ino)
rb_link_node(&qri->node, parent, node);
rb_insert_color(&qri->node, &mdsc->quotarealms_inodes);
} else
pr_warn("Failed to alloc quotarealms_inode\n");
pr_warn_client(cl, "Failed to alloc quotarealms_inode\n");
}
mutex_unlock(&mdsc->quotarealms_inodes_mutex);
@ -129,6 +131,7 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
struct super_block *sb,
struct ceph_snap_realm *realm)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_quotarealm_inode *qri;
struct inode *in;
@ -161,8 +164,8 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
}
if (IS_ERR(in)) {
dout("Can't lookup inode %llx (err: %ld)\n",
realm->ino, PTR_ERR(in));
doutc(cl, "Can't lookup inode %llx (err: %ld)\n", realm->ino,
PTR_ERR(in));
qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */
} else {
qri->timeout = 0;
@ -213,6 +216,7 @@ static struct ceph_snap_realm *get_quota_realm(struct ceph_mds_client *mdsc,
enum quota_get_realm which_quota,
bool retry)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci = NULL;
struct ceph_snap_realm *realm, *next;
struct inode *in;
@ -226,8 +230,9 @@ restart:
if (realm)
ceph_get_snap_realm(mdsc, realm);
else
pr_err_ratelimited("get_quota_realm: ino (%llx.%llx) "
"null i_snap_realm\n", ceph_vinop(inode));
pr_err_ratelimited_client(cl,
"%p %llx.%llx null i_snap_realm\n",
inode, ceph_vinop(inode));
while (realm) {
bool has_inode;
@ -317,6 +322,7 @@ static bool check_quota_exceeded(struct inode *inode, enum quota_check_op op,
loff_t delta)
{
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci;
struct ceph_snap_realm *realm, *next;
struct inode *in;
@ -332,8 +338,9 @@ restart:
if (realm)
ceph_get_snap_realm(mdsc, realm);
else
pr_err_ratelimited("check_quota_exceeded: ino (%llx.%llx) "
"null i_snap_realm\n", ceph_vinop(inode));
pr_err_ratelimited_client(cl,
"%p %llx.%llx null i_snap_realm\n",
inode, ceph_vinop(inode));
while (realm) {
bool has_inode;
@ -383,7 +390,7 @@ restart:
break;
default:
/* Shouldn't happen */
pr_warn("Invalid quota check op (%d)\n", op);
pr_warn_client(cl, "Invalid quota check op (%d)\n", op);
exceeded = true; /* Just break the loop */
}
iput(in);

View File

@ -138,7 +138,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
__insert_snap_realm(&mdsc->snap_realms, realm);
mdsc->num_snap_realms++;
dout("%s %llx %p\n", __func__, realm->ino, realm);
doutc(mdsc->fsc->client, "%llx %p\n", realm->ino, realm);
return realm;
}
@ -150,6 +150,7 @@ static struct ceph_snap_realm *ceph_create_snap_realm(
static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
u64 ino)
{
struct ceph_client *cl = mdsc->fsc->client;
struct rb_node *n = mdsc->snap_realms.rb_node;
struct ceph_snap_realm *r;
@ -162,7 +163,7 @@ static struct ceph_snap_realm *__lookup_snap_realm(struct ceph_mds_client *mdsc,
else if (ino > r->ino)
n = n->rb_right;
else {
dout("%s %llx %p\n", __func__, r->ino, r);
doutc(cl, "%llx %p\n", r->ino, r);
return r;
}
}
@ -188,9 +189,10 @@ static void __put_snap_realm(struct ceph_mds_client *mdsc,
static void __destroy_snap_realm(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm)
{
struct ceph_client *cl = mdsc->fsc->client;
lockdep_assert_held_write(&mdsc->snap_rwsem);
dout("%s %p %llx\n", __func__, realm, realm->ino);
doutc(cl, "%p %llx\n", realm, realm->ino);
rb_erase(&realm->node, &mdsc->snap_realms);
mdsc->num_snap_realms--;
@ -290,6 +292,7 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm,
u64 parentino)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_snap_realm *parent;
lockdep_assert_held_write(&mdsc->snap_rwsem);
@ -303,8 +306,8 @@ static int adjust_snap_realm_parent(struct ceph_mds_client *mdsc,
if (IS_ERR(parent))
return PTR_ERR(parent);
}
dout("%s %llx %p: %llx %p -> %llx %p\n", __func__, realm->ino,
realm, realm->parent_ino, realm->parent, parentino, parent);
doutc(cl, "%llx %p: %llx %p -> %llx %p\n", realm->ino, realm,
realm->parent_ino, realm->parent, parentino, parent);
if (realm->parent) {
list_del_init(&realm->child_item);
ceph_put_snap_realm(mdsc, realm->parent);
@ -329,10 +332,12 @@ static int cmpu64_rev(const void *a, const void *b)
/*
* build the snap context for a given realm.
*/
static int build_snap_context(struct ceph_snap_realm *realm,
static int build_snap_context(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm,
struct list_head *realm_queue,
struct list_head *dirty_realms)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_snap_realm *parent = realm->parent;
struct ceph_snap_context *snapc;
int err = 0;
@ -360,10 +365,10 @@ static int build_snap_context(struct ceph_snap_realm *realm,
realm->cached_context->seq == realm->seq &&
(!parent ||
realm->cached_context->seq >= parent->cached_context->seq)) {
dout("%s %llx %p: %p seq %lld (%u snaps) (unchanged)\n",
__func__, realm->ino, realm, realm->cached_context,
realm->cached_context->seq,
(unsigned int)realm->cached_context->num_snaps);
doutc(cl, "%llx %p: %p seq %lld (%u snaps) (unchanged)\n",
realm->ino, realm, realm->cached_context,
realm->cached_context->seq,
(unsigned int)realm->cached_context->num_snaps);
return 0;
}
@ -400,8 +405,8 @@ static int build_snap_context(struct ceph_snap_realm *realm,
sort(snapc->snaps, num, sizeof(u64), cmpu64_rev, NULL);
snapc->num_snaps = num;
dout("%s %llx %p: %p seq %lld (%u snaps)\n", __func__, realm->ino,
realm, snapc, snapc->seq, (unsigned int) snapc->num_snaps);
doutc(cl, "%llx %p: %p seq %lld (%u snaps)\n", realm->ino, realm,
snapc, snapc->seq, (unsigned int) snapc->num_snaps);
ceph_put_snap_context(realm->cached_context);
realm->cached_context = snapc;
@ -418,16 +423,18 @@ fail:
ceph_put_snap_context(realm->cached_context);
realm->cached_context = NULL;
}
pr_err("%s %llx %p fail %d\n", __func__, realm->ino, realm, err);
pr_err_client(cl, "%llx %p fail %d\n", realm->ino, realm, err);
return err;
}
/*
* rebuild snap context for the given realm and all of its children.
*/
static void rebuild_snap_realms(struct ceph_snap_realm *realm,
static void rebuild_snap_realms(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm,
struct list_head *dirty_realms)
{
struct ceph_client *cl = mdsc->fsc->client;
LIST_HEAD(realm_queue);
int last = 0;
bool skip = false;
@ -451,9 +458,10 @@ static void rebuild_snap_realms(struct ceph_snap_realm *realm,
continue;
}
last = build_snap_context(_realm, &realm_queue, dirty_realms);
dout("%s %llx %p, %s\n", __func__, _realm->ino, _realm,
last > 0 ? "is deferred" : !last ? "succeeded" : "failed");
last = build_snap_context(mdsc, _realm, &realm_queue,
dirty_realms);
doutc(cl, "%llx %p, %s\n", realm->ino, realm,
last > 0 ? "is deferred" : !last ? "succeeded" : "failed");
/* is any child in the list ? */
list_for_each_entry(child, &_realm->children, child_item) {
@ -523,6 +531,7 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
struct ceph_cap_snap **pcapsnap)
{
struct inode *inode = &ci->netfs.inode;
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_snap_context *old_snapc, *new_snapc;
struct ceph_cap_snap *capsnap = *pcapsnap;
struct ceph_buffer *old_blob = NULL;
@ -548,14 +557,14 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
as no new writes are allowed to start when pending, so any
writes in progress now were started before the previous
cap_snap. lucky us. */
dout("%s %p %llx.%llx already pending\n",
__func__, inode, ceph_vinop(inode));
doutc(cl, "%p %llx.%llx already pending\n", inode,
ceph_vinop(inode));
goto update_snapc;
}
if (ci->i_wrbuffer_ref_head == 0 &&
!(dirty & (CEPH_CAP_ANY_EXCL|CEPH_CAP_FILE_WR))) {
dout("%s %p %llx.%llx nothing dirty|writing\n",
__func__, inode, ceph_vinop(inode));
doutc(cl, "%p %llx.%llx nothing dirty|writing\n", inode,
ceph_vinop(inode));
goto update_snapc;
}
@ -575,15 +584,15 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
} else {
if (!(used & CEPH_CAP_FILE_WR) &&
ci->i_wrbuffer_ref_head == 0) {
dout("%s %p %llx.%llx no new_snap|dirty_page|writing\n",
__func__, inode, ceph_vinop(inode));
doutc(cl, "%p %llx.%llx no new_snap|dirty_page|writing\n",
inode, ceph_vinop(inode));
goto update_snapc;
}
}
dout("%s %p %llx.%llx cap_snap %p queuing under %p %s %s\n",
__func__, inode, ceph_vinop(inode), capsnap, old_snapc,
ceph_cap_string(dirty), capsnap->need_flush ? "" : "no_flush");
doutc(cl, "%p %llx.%llx cap_snap %p queuing under %p %s %s\n",
inode, ceph_vinop(inode), capsnap, old_snapc,
ceph_cap_string(dirty), capsnap->need_flush ? "" : "no_flush");
ihold(inode);
capsnap->follows = old_snapc->seq;
@ -615,9 +624,9 @@ static void ceph_queue_cap_snap(struct ceph_inode_info *ci,
list_add_tail(&capsnap->ci_item, &ci->i_cap_snaps);
if (used & CEPH_CAP_FILE_WR) {
dout("%s %p %llx.%llx cap_snap %p snapc %p seq %llu used WR,"
" now pending\n", __func__, inode, ceph_vinop(inode),
capsnap, old_snapc, old_snapc->seq);
doutc(cl, "%p %llx.%llx cap_snap %p snapc %p seq %llu used WR,"
" now pending\n", inode, ceph_vinop(inode), capsnap,
old_snapc, old_snapc->seq);
capsnap->writing = 1;
} else {
/* note mtime, size NOW. */
@ -634,7 +643,7 @@ update_snapc:
ci->i_head_snapc = NULL;
} else {
ci->i_head_snapc = ceph_get_snap_context(new_snapc);
dout(" new snapc is %p\n", new_snapc);
doutc(cl, " new snapc is %p\n", new_snapc);
}
spin_unlock(&ci->i_ceph_lock);
@ -655,6 +664,7 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
{
struct inode *inode = &ci->netfs.inode;
struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
struct ceph_client *cl = mdsc->fsc->client;
BUG_ON(capsnap->writing);
capsnap->size = i_size_read(inode);
@ -667,11 +677,12 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
capsnap->truncate_size = ci->i_truncate_size;
capsnap->truncate_seq = ci->i_truncate_seq;
if (capsnap->dirty_pages) {
dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu "
"still has %d dirty pages\n", __func__, inode,
ceph_vinop(inode), capsnap, capsnap->context,
capsnap->context->seq, ceph_cap_string(capsnap->dirty),
capsnap->size, capsnap->dirty_pages);
doutc(cl, "%p %llx.%llx cap_snap %p snapc %p %llu %s "
"s=%llu still has %d dirty pages\n", inode,
ceph_vinop(inode), capsnap, capsnap->context,
capsnap->context->seq,
ceph_cap_string(capsnap->dirty),
capsnap->size, capsnap->dirty_pages);
return 0;
}
@ -680,20 +691,20 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
* And trigger to flush the buffer immediately.
*/
if (ci->i_wrbuffer_ref) {
dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu "
"used WRBUFFER, delaying\n", __func__, inode,
ceph_vinop(inode), capsnap, capsnap->context,
capsnap->context->seq, ceph_cap_string(capsnap->dirty),
capsnap->size);
doutc(cl, "%p %llx.%llx cap_snap %p snapc %p %llu %s "
"s=%llu used WRBUFFER, delaying\n", inode,
ceph_vinop(inode), capsnap, capsnap->context,
capsnap->context->seq, ceph_cap_string(capsnap->dirty),
capsnap->size);
ceph_queue_writeback(inode);
return 0;
}
ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
dout("%s %p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu\n",
__func__, inode, ceph_vinop(inode), capsnap, capsnap->context,
capsnap->context->seq, ceph_cap_string(capsnap->dirty),
capsnap->size);
doutc(cl, "%p %llx.%llx cap_snap %p snapc %p %llu %s s=%llu\n",
inode, ceph_vinop(inode), capsnap, capsnap->context,
capsnap->context->seq, ceph_cap_string(capsnap->dirty),
capsnap->size);
spin_lock(&mdsc->snap_flush_lock);
if (list_empty(&ci->i_snap_flush_item)) {
@ -708,13 +719,15 @@ int __ceph_finish_cap_snap(struct ceph_inode_info *ci,
* Queue cap_snaps for snap writeback for this realm and its children.
* Called under snap_rwsem, so realm topology won't change.
*/
static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
static void queue_realm_cap_snaps(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci;
struct inode *lastinode = NULL;
struct ceph_cap_snap *capsnap = NULL;
dout("%s %p %llx inode\n", __func__, realm, realm->ino);
doutc(cl, "%p %llx inode\n", realm, realm->ino);
spin_lock(&realm->inodes_with_caps_lock);
list_for_each_entry(ci, &realm->inodes_with_caps, i_snap_realm_item) {
@ -733,8 +746,9 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
if (!capsnap) {
capsnap = kmem_cache_zalloc(ceph_cap_snap_cachep, GFP_NOFS);
if (!capsnap) {
pr_err("ENOMEM allocating ceph_cap_snap on %p\n",
inode);
pr_err_client(cl,
"ENOMEM allocating ceph_cap_snap on %p\n",
inode);
return;
}
}
@ -752,7 +766,7 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm)
if (capsnap)
kmem_cache_free(ceph_cap_snap_cachep, capsnap);
dout("%s %p %llx done\n", __func__, realm, realm->ino);
doutc(cl, "%p %llx done\n", realm, realm->ino);
}
/*
@ -766,6 +780,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
void *p, void *e, bool deletion,
struct ceph_snap_realm **realm_ret)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_mds_snap_realm *ri; /* encoded */
__le64 *snaps; /* encoded */
__le64 *prior_parent_snaps; /* encoded */
@ -780,7 +795,7 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
lockdep_assert_held_write(&mdsc->snap_rwsem);
dout("%s deletion=%d\n", __func__, deletion);
doutc(cl, "deletion=%d\n", deletion);
more:
realm = NULL;
rebuild_snapcs = 0;
@ -810,8 +825,8 @@ more:
rebuild_snapcs += err;
if (le64_to_cpu(ri->seq) > realm->seq) {
dout("%s updating %llx %p %lld -> %lld\n", __func__,
realm->ino, realm, realm->seq, le64_to_cpu(ri->seq));
doutc(cl, "updating %llx %p %lld -> %lld\n", realm->ino,
realm, realm->seq, le64_to_cpu(ri->seq));
/* update realm parameters, snap lists */
realm->seq = le64_to_cpu(ri->seq);
realm->created = le64_to_cpu(ri->created);
@ -834,16 +849,16 @@ more:
rebuild_snapcs = 1;
} else if (!realm->cached_context) {
dout("%s %llx %p seq %lld new\n", __func__,
realm->ino, realm, realm->seq);
doutc(cl, "%llx %p seq %lld new\n", realm->ino, realm,
realm->seq);
rebuild_snapcs = 1;
} else {
dout("%s %llx %p seq %lld unchanged\n", __func__,
realm->ino, realm, realm->seq);
doutc(cl, "%llx %p seq %lld unchanged\n", realm->ino, realm,
realm->seq);
}
dout("done with %llx %p, rebuild_snapcs=%d, %p %p\n", realm->ino,
realm, rebuild_snapcs, p, e);
doutc(cl, "done with %llx %p, rebuild_snapcs=%d, %p %p\n", realm->ino,
realm, rebuild_snapcs, p, e);
/*
* this will always track the uppest parent realm from which
@ -855,7 +870,7 @@ more:
/* rebuild_snapcs when we reach the _end_ (root) of the trace */
if (realm_to_rebuild && p >= e)
rebuild_snap_realms(realm_to_rebuild, &dirty_realms);
rebuild_snap_realms(mdsc, realm_to_rebuild, &dirty_realms);
if (!first_realm)
first_realm = realm;
@ -873,7 +888,7 @@ more:
realm = list_first_entry(&dirty_realms, struct ceph_snap_realm,
dirty_item);
list_del_init(&realm->dirty_item);
queue_realm_cap_snaps(realm);
queue_realm_cap_snaps(mdsc, realm);
}
if (realm_ret)
@ -891,7 +906,7 @@ fail:
ceph_put_snap_realm(mdsc, realm);
if (first_realm)
ceph_put_snap_realm(mdsc, first_realm);
pr_err("%s error %d\n", __func__, err);
pr_err_client(cl, "error %d\n", err);
/*
* When receiving a corrupted snap trace we don't know what
@ -905,11 +920,12 @@ fail:
WRITE_ONCE(mdsc->fsc->mount_state, CEPH_MOUNT_FENCE_IO);
ret = ceph_monc_blocklist_add(&client->monc, &client->msgr.inst.addr);
if (ret)
pr_err("%s failed to blocklist %s: %d\n", __func__,
ceph_pr_addr(&client->msgr.inst.addr), ret);
pr_err_client(cl, "failed to blocklist %s: %d\n",
ceph_pr_addr(&client->msgr.inst.addr), ret);
WARN(1, "%s: %s%sdo remount to continue%s",
__func__, ret ? "" : ceph_pr_addr(&client->msgr.inst.addr),
WARN(1, "[client.%lld] %s %s%sdo remount to continue%s",
client->monc.auth->global_id, __func__,
ret ? "" : ceph_pr_addr(&client->msgr.inst.addr),
ret ? "" : " was blocklisted, ",
err == -EIO ? " after corrupted snaptrace is fixed" : "");
@ -925,11 +941,12 @@ fail:
*/
static void flush_snaps(struct ceph_mds_client *mdsc)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_inode_info *ci;
struct inode *inode;
struct ceph_mds_session *session = NULL;
dout("%s\n", __func__);
doutc(cl, "begin\n");
spin_lock(&mdsc->snap_flush_lock);
while (!list_empty(&mdsc->snap_flush_list)) {
ci = list_first_entry(&mdsc->snap_flush_list,
@ -944,7 +961,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
spin_unlock(&mdsc->snap_flush_lock);
ceph_put_mds_session(session);
dout("%s done\n", __func__);
doutc(cl, "done\n");
}
/**
@ -960,7 +977,7 @@ static void flush_snaps(struct ceph_mds_client *mdsc)
void ceph_change_snap_realm(struct inode *inode, struct ceph_snap_realm *realm)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
struct ceph_mds_client *mdsc = ceph_inode_to_fs_client(inode)->mdsc;
struct ceph_snap_realm *oldrealm = ci->i_snap_realm;
lockdep_assert_held(&ci->i_ceph_lock);
@ -1000,6 +1017,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,
struct ceph_msg *msg)
{
struct ceph_client *cl = mdsc->fsc->client;
struct super_block *sb = mdsc->fsc->sb;
int mds = session->s_mds;
u64 split;
@ -1030,8 +1048,8 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
trace_len = le32_to_cpu(h->trace_len);
p += sizeof(*h);
dout("%s from mds%d op %s split %llx tracelen %d\n", __func__,
mds, ceph_snap_op_name(op), split, trace_len);
doutc(cl, "from mds%d op %s split %llx tracelen %d\n", mds,
ceph_snap_op_name(op), split, trace_len);
down_write(&mdsc->snap_rwsem);
locked_rwsem = 1;
@ -1062,7 +1080,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
goto out;
}
dout("splitting snap_realm %llx %p\n", realm->ino, realm);
doutc(cl, "splitting snap_realm %llx %p\n", realm->ino, realm);
for (i = 0; i < num_split_inos; i++) {
struct ceph_vino vino = {
.ino = le64_to_cpu(split_inos[i]),
@ -1087,13 +1105,13 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
*/
if (ci->i_snap_realm->created >
le64_to_cpu(ri->created)) {
dout(" leaving %p %llx.%llx in newer realm %llx %p\n",
inode, ceph_vinop(inode), ci->i_snap_realm->ino,
ci->i_snap_realm);
doutc(cl, " leaving %p %llx.%llx in newer realm %llx %p\n",
inode, ceph_vinop(inode), ci->i_snap_realm->ino,
ci->i_snap_realm);
goto skip_inode;
}
dout(" will move %p %llx.%llx to split realm %llx %p\n",
inode, ceph_vinop(inode), realm->ino, realm);
doutc(cl, " will move %p %llx.%llx to split realm %llx %p\n",
inode, ceph_vinop(inode), realm->ino, realm);
ceph_get_snap_realm(mdsc, realm);
ceph_change_snap_realm(inode, realm);
@ -1154,7 +1172,7 @@ skip_inode:
return;
bad:
pr_err("%s corrupt snap message from mds%d\n", __func__, mds);
pr_err_client(cl, "corrupt snap message from mds%d\n", mds);
ceph_msg_dump(msg);
out:
if (locked_rwsem)
@ -1170,6 +1188,7 @@ out:
struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
u64 snap)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_snapid_map *sm, *exist;
struct rb_node **p, *parent;
int ret;
@ -1192,8 +1211,8 @@ struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
}
spin_unlock(&mdsc->snapid_map_lock);
if (exist) {
dout("%s found snapid map %llx -> %x\n", __func__,
exist->snap, exist->dev);
doutc(cl, "found snapid map %llx -> %x\n", exist->snap,
exist->dev);
return exist;
}
@ -1237,13 +1256,12 @@ struct ceph_snapid_map* ceph_get_snapid_map(struct ceph_mds_client *mdsc,
if (exist) {
free_anon_bdev(sm->dev);
kfree(sm);
dout("%s found snapid map %llx -> %x\n", __func__,
exist->snap, exist->dev);
doutc(cl, "found snapid map %llx -> %x\n", exist->snap,
exist->dev);
return exist;
}
dout("%s create snapid map %llx -> %x\n", __func__,
sm->snap, sm->dev);
doutc(cl, "create snapid map %llx -> %x\n", sm->snap, sm->dev);
return sm;
}
@ -1268,6 +1286,7 @@ void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_snapid_map *sm;
unsigned long now;
LIST_HEAD(to_free);
@ -1289,7 +1308,7 @@ void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
while (!list_empty(&to_free)) {
sm = list_first_entry(&to_free, struct ceph_snapid_map, lru);
list_del(&sm->lru);
dout("trim snapid map %llx -> %x\n", sm->snap, sm->dev);
doutc(cl, "trim snapid map %llx -> %x\n", sm->snap, sm->dev);
free_anon_bdev(sm->dev);
kfree(sm);
}
@ -1297,6 +1316,7 @@ void ceph_trim_snapid_map(struct ceph_mds_client *mdsc)
void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
{
struct ceph_client *cl = mdsc->fsc->client;
struct ceph_snapid_map *sm;
struct rb_node *p;
LIST_HEAD(to_free);
@ -1315,8 +1335,8 @@ void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc)
list_del(&sm->lru);
free_anon_bdev(sm->dev);
if (WARN_ON_ONCE(atomic_read(&sm->ref))) {
pr_err("snapid map %llx -> %x still in use\n",
sm->snap, sm->dev);
pr_err_client(cl, "snapid map %llx -> %x still in use\n",
sm->snap, sm->dev);
}
kfree(sm);
}

View File

@ -44,28 +44,29 @@ static LIST_HEAD(ceph_fsc_list);
*/
static void ceph_put_super(struct super_block *s)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(s);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(s);
dout("put_super\n");
doutc(fsc->client, "begin\n");
ceph_fscrypt_free_dummy_policy(fsc);
ceph_mdsc_close_sessions(fsc->mdsc);
doutc(fsc->client, "done\n");
}
static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(d_inode(dentry));
struct ceph_mon_client *monc = &fsc->client->monc;
struct ceph_statfs st;
int i, err;
u64 data_pool;
doutc(fsc->client, "begin\n");
if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
data_pool = fsc->mdsc->mdsmap->m_data_pg_pools[0];
} else {
data_pool = CEPH_NOPOOL;
}
dout("statfs\n");
err = ceph_monc_do_statfs(monc, data_pool, &st);
if (err < 0)
return err;
@ -113,24 +114,26 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
/* fold the fs_cluster_id into the upper bits */
buf->f_fsid.val[1] = monc->fs_cluster_id;
doutc(fsc->client, "done\n");
return 0;
}
static int ceph_sync_fs(struct super_block *sb, int wait)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
struct ceph_client *cl = fsc->client;
if (!wait) {
dout("sync_fs (non-blocking)\n");
doutc(cl, "(non-blocking)\n");
ceph_flush_dirty_caps(fsc->mdsc);
dout("sync_fs (non-blocking) done\n");
doutc(cl, "(non-blocking) done\n");
return 0;
}
dout("sync_fs (blocking)\n");
doutc(cl, "(blocking)\n");
ceph_osdc_sync(&fsc->client->osdc);
ceph_mdsc_sync(fsc->mdsc);
dout("sync_fs (blocking) done\n");
doutc(cl, "(blocking) done\n");
return 0;
}
@ -341,7 +344,7 @@ static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
char *dev_name = param->string, *dev_name_end;
int ret;
dout("%s '%s'\n", __func__, dev_name);
dout("'%s'\n", dev_name);
if (!dev_name || !*dev_name)
return invalfc(fc, "Empty source");
@ -413,7 +416,7 @@ static int ceph_parse_mount_param(struct fs_context *fc,
return ret;
token = fs_parse(fc, ceph_mount_parameters, param, &result);
dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
dout("%s: fs_parse '%s' token %d\n",__func__, param->key, token);
if (token < 0)
return token;
@ -684,7 +687,7 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
*/
static int ceph_show_options(struct seq_file *m, struct dentry *root)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(root->d_sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(root->d_sb);
struct ceph_mount_options *fsopt = fsc->mount_options;
size_t pos;
int ret;
@ -881,7 +884,7 @@ static void flush_fs_workqueues(struct ceph_fs_client *fsc)
static void destroy_fs_client(struct ceph_fs_client *fsc)
{
dout("destroy_fs_client %p\n", fsc);
doutc(fsc->client, "%p\n", fsc);
spin_lock(&ceph_fsc_lock);
list_del(&fsc->metric_wakeup);
@ -896,7 +899,7 @@ static void destroy_fs_client(struct ceph_fs_client *fsc)
ceph_destroy_client(fsc->client);
kfree(fsc);
dout("destroy_fs_client %p done\n", fsc);
dout("%s: %p done\n", __func__, fsc);
}
/*
@ -1015,9 +1018,9 @@ static void __ceph_umount_begin(struct ceph_fs_client *fsc)
*/
void ceph_umount_begin(struct super_block *sb)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
dout("ceph_umount_begin - starting forced umount\n");
doutc(fsc->client, "starting forced umount\n");
if (!fsc)
return;
fsc->mount_state = CEPH_MOUNT_SHUTDOWN;
@ -1045,13 +1048,14 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
const char *path,
unsigned long started)
{
struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req = NULL;
int err;
struct dentry *root;
/* open dir */
dout("open_root_inode opening '%s'\n", path);
doutc(cl, "opening '%s'\n", path);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_GETATTR, USE_ANY_MDS);
if (IS_ERR(req))
return ERR_CAST(req);
@ -1071,13 +1075,13 @@ static struct dentry *open_root_dentry(struct ceph_fs_client *fsc,
if (err == 0) {
struct inode *inode = req->r_target_inode;
req->r_target_inode = NULL;
dout("open_root_inode success\n");
doutc(cl, "success\n");
root = d_make_root(inode);
if (!root) {
root = ERR_PTR(-ENOMEM);
goto out;
}
dout("open_root_inode success, root dentry is %p\n", root);
doutc(cl, "success, root dentry is %p\n", root);
} else {
root = ERR_PTR(err);
}
@ -1136,11 +1140,12 @@ static int ceph_apply_test_dummy_encryption(struct super_block *sb,
static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
struct fs_context *fc)
{
struct ceph_client *cl = fsc->client;
int err;
unsigned long started = jiffies; /* note the start time */
struct dentry *root;
dout("mount start %p\n", fsc);
doutc(cl, "mount start %p\n", fsc);
mutex_lock(&fsc->client->mount_mutex);
if (!fsc->sb->s_root) {
@ -1163,7 +1168,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
if (err)
goto out;
dout("mount opening path '%s'\n", path);
doutc(cl, "mount opening path '%s'\n", path);
ceph_fs_debugfs_init(fsc);
@ -1178,7 +1183,7 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
}
fsc->mount_state = CEPH_MOUNT_MOUNTED;
dout("mount success\n");
doutc(cl, "mount success\n");
mutex_unlock(&fsc->client->mount_mutex);
return root;
@ -1191,9 +1196,10 @@ out:
static int ceph_set_super(struct super_block *s, struct fs_context *fc)
{
struct ceph_fs_client *fsc = s->s_fs_info;
struct ceph_client *cl = fsc->client;
int ret;
dout("set_super %p\n", s);
doutc(cl, "%p\n", s);
s->s_maxbytes = MAX_LFS_FILESIZE;
@ -1226,31 +1232,32 @@ static int ceph_compare_super(struct super_block *sb, struct fs_context *fc)
struct ceph_fs_client *new = fc->s_fs_info;
struct ceph_mount_options *fsopt = new->mount_options;
struct ceph_options *opt = new->client->options;
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
struct ceph_client *cl = fsc->client;
dout("ceph_compare_super %p\n", sb);
doutc(cl, "%p\n", sb);
if (compare_mount_options(fsopt, opt, fsc)) {
dout("monitor(s)/mount options don't match\n");
doutc(cl, "monitor(s)/mount options don't match\n");
return 0;
}
if ((opt->flags & CEPH_OPT_FSID) &&
ceph_fsid_compare(&opt->fsid, &fsc->client->fsid)) {
dout("fsid doesn't match\n");
doutc(cl, "fsid doesn't match\n");
return 0;
}
if (fc->sb_flags != (sb->s_flags & ~SB_BORN)) {
dout("flags differ\n");
doutc(cl, "flags differ\n");
return 0;
}
if (fsc->blocklisted && !ceph_test_mount_opt(fsc, CLEANRECOVER)) {
dout("client is blocklisted (and CLEANRECOVER is not set)\n");
doutc(cl, "client is blocklisted (and CLEANRECOVER is not set)\n");
return 0;
}
if (fsc->mount_state == CEPH_MOUNT_SHUTDOWN) {
dout("client has been forcibly unmounted\n");
doutc(cl, "client has been forcibly unmounted\n");
return 0;
}
@ -1322,9 +1329,9 @@ static int ceph_get_tree(struct fs_context *fc)
goto out;
}
if (ceph_sb_to_client(sb) != fsc) {
if (ceph_sb_to_fs_client(sb) != fsc) {
destroy_fs_client(fsc);
fsc = ceph_sb_to_client(sb);
fsc = ceph_sb_to_fs_client(sb);
dout("get_sb got existing client %p\n", fsc);
} else {
dout("get_sb using new client %p\n", fsc);
@ -1338,8 +1345,9 @@ static int ceph_get_tree(struct fs_context *fc)
err = PTR_ERR(res);
goto out_splat;
}
dout("root %p inode %p ino %llx.%llx\n", res,
d_inode(res), ceph_vinop(d_inode(res)));
doutc(fsc->client, "root %p inode %p ino %llx.%llx\n", res,
d_inode(res), ceph_vinop(d_inode(res)));
fc->root = fsc->sb->s_root;
return 0;
@ -1377,7 +1385,7 @@ static int ceph_reconfigure_fc(struct fs_context *fc)
struct ceph_parse_opts_ctx *pctx = fc->fs_private;
struct ceph_mount_options *fsopt = pctx->opts;
struct super_block *sb = fc->root->d_sb;
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
err = ceph_apply_test_dummy_encryption(sb, fc, fsopt);
if (err)
@ -1397,7 +1405,8 @@ static int ceph_reconfigure_fc(struct fs_context *fc)
kfree(fsc->mount_options->mon_addr);
fsc->mount_options->mon_addr = fsopt->mon_addr;
fsopt->mon_addr = NULL;
pr_notice("ceph: monitor addresses recorded, but not used for reconnection");
pr_notice_client(fsc->client,
"monitor addresses recorded, but not used for reconnection");
}
sync_filesystem(sb);
@ -1516,11 +1525,12 @@ void ceph_dec_osd_stopping_blocker(struct ceph_mds_client *mdsc)
static void ceph_kill_sb(struct super_block *s)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(s);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(s);
struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
bool wait;
dout("kill_sb %p\n", s);
doutc(cl, "%p\n", s);
ceph_mdsc_pre_umount(mdsc);
flush_fs_workqueues(fsc);
@ -1551,9 +1561,9 @@ static void ceph_kill_sb(struct super_block *s)
&mdsc->stopping_waiter,
fsc->client->options->mount_timeout);
if (!timeleft) /* timed out */
pr_warn("umount timed out, %ld\n", timeleft);
pr_warn_client(cl, "umount timed out, %ld\n", timeleft);
else if (timeleft < 0) /* killed */
pr_warn("umount was killed, %ld\n", timeleft);
pr_warn_client(cl, "umount was killed, %ld\n", timeleft);
}
mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED;
@ -1572,13 +1582,13 @@ static struct file_system_type ceph_fs_type = {
.name = "ceph",
.init_fs_context = ceph_init_fs_context,
.kill_sb = ceph_kill_sb,
.fs_flags = FS_RENAME_DOES_D_MOVE,
.fs_flags = FS_RENAME_DOES_D_MOVE | FS_ALLOW_IDMAP,
};
MODULE_ALIAS_FS("ceph");
int ceph_force_reconnect(struct super_block *sb)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(sb);
int err = 0;
fsc->mount_state = CEPH_MOUNT_RECOVER;
@ -1671,6 +1681,11 @@ static const struct kernel_param_ops param_ops_mount_syntax = {
module_param_cb(mount_syntax_v1, &param_ops_mount_syntax, &mount_support, 0444);
module_param_cb(mount_syntax_v2, &param_ops_mount_syntax, &mount_support, 0444);
bool enable_unsafe_idmap = false;
module_param(enable_unsafe_idmap, bool, 0644);
MODULE_PARM_DESC(enable_unsafe_idmap,
"Allow to use idmapped mounts with MDS without CEPHFS_FEATURE_HAS_OWNER_UIDGID");
module_init(init_ceph);
module_exit(exit_ceph);

View File

@ -488,13 +488,13 @@ ceph_inode(const struct inode *inode)
}
static inline struct ceph_fs_client *
ceph_inode_to_client(const struct inode *inode)
ceph_inode_to_fs_client(const struct inode *inode)
{
return (struct ceph_fs_client *)inode->i_sb->s_fs_info;
}
static inline struct ceph_fs_client *
ceph_sb_to_client(const struct super_block *sb)
ceph_sb_to_fs_client(const struct super_block *sb)
{
return (struct ceph_fs_client *)sb->s_fs_info;
}
@ -502,7 +502,13 @@ ceph_sb_to_client(const struct super_block *sb)
static inline struct ceph_mds_client *
ceph_sb_to_mdsc(const struct super_block *sb)
{
return (struct ceph_mds_client *)ceph_sb_to_client(sb)->mdsc;
return (struct ceph_mds_client *)ceph_sb_to_fs_client(sb)->mdsc;
}
static inline struct ceph_client *
ceph_inode_to_client(const struct inode *inode)
{
return (struct ceph_client *)ceph_inode_to_fs_client(inode)->client;
}
static inline struct ceph_vino
@ -558,7 +564,7 @@ static inline u64 ceph_snap(struct inode *inode)
*/
static inline u64 ceph_present_ino(struct super_block *sb, u64 ino)
{
if (unlikely(ceph_test_mount_opt(ceph_sb_to_client(sb), INO32)))
if (unlikely(ceph_test_mount_opt(ceph_sb_to_fs_client(sb), INO32)))
return ceph_ino_to_ino32(ino);
return ino;
}
@ -1094,8 +1100,8 @@ struct ceph_iattr {
struct ceph_fscrypt_auth *fscrypt_auth;
};
extern int __ceph_setattr(struct inode *inode, struct iattr *attr,
struct ceph_iattr *cia);
extern int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
struct iattr *attr, struct ceph_iattr *cia);
extern int ceph_setattr(struct mnt_idmap *idmap,
struct dentry *dentry, struct iattr *attr);
extern int ceph_getattr(struct mnt_idmap *idmap,
@ -1106,7 +1112,7 @@ void ceph_inode_shutdown(struct inode *inode);
static inline bool ceph_inode_is_shutdown(struct inode *inode)
{
unsigned long flags = READ_ONCE(ceph_inode(inode)->i_ceph_flags);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_fs_client *fsc = ceph_inode_to_fs_client(inode);
int state = READ_ONCE(fsc->mount_state);
return (flags & CEPH_I_SHUTDOWN) || state >= CEPH_MOUNT_SHUTDOWN;
@ -1223,7 +1229,8 @@ extern void ceph_add_cap(struct inode *inode,
unsigned cap, unsigned seq, u64 realmino, int flags,
struct ceph_cap **new_cap);
extern void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
extern void ceph_remove_cap(struct ceph_cap *cap, bool queue_release);
extern void ceph_remove_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
bool queue_release);
extern void __ceph_remove_caps(struct ceph_inode_info *ci);
extern void ceph_put_cap(struct ceph_mds_client *mdsc,
struct ceph_cap *cap);

View File

@ -57,7 +57,8 @@ static bool ceph_vxattrcb_layout_exists(struct ceph_inode_info *ci)
static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
size_t size)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
struct ceph_client *cl = fsc->client;
struct ceph_osd_client *osdc = &fsc->client->osdc;
struct ceph_string *pool_ns;
s64 pool = ci->i_layout.pool_id;
@ -69,7 +70,7 @@ static ssize_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
pool_ns = ceph_try_get_string(ci->i_layout.pool_ns);
dout("ceph_vxattrcb_layout %p\n", &ci->netfs.inode);
doutc(cl, "%p\n", &ci->netfs.inode);
down_read(&osdc->lock);
pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, pool);
if (pool_name) {
@ -161,7 +162,7 @@ static ssize_t ceph_vxattrcb_layout_pool(struct ceph_inode_info *ci,
char *val, size_t size)
{
ssize_t ret;
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
struct ceph_osd_client *osdc = &fsc->client->osdc;
s64 pool = ci->i_layout.pool_id;
const char *pool_name;
@ -313,7 +314,7 @@ static ssize_t ceph_vxattrcb_snap_btime(struct ceph_inode_info *ci, char *val,
static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
char *val, size_t size)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
return ceph_fmt_xattr(val, size, "%pU", &fsc->client->fsid);
}
@ -321,7 +322,7 @@ static ssize_t ceph_vxattrcb_cluster_fsid(struct ceph_inode_info *ci,
static ssize_t ceph_vxattrcb_client_id(struct ceph_inode_info *ci,
char *val, size_t size)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(ci->netfs.inode.i_sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(ci->netfs.inode.i_sb);
return ceph_fmt_xattr(val, size, "client%lld",
ceph_client_gid(fsc->client));
@ -570,6 +571,8 @@ static int __set_xattr(struct ceph_inode_info *ci,
int flags, int update_xattr,
struct ceph_inode_xattr **newxattr)
{
struct inode *inode = &ci->netfs.inode;
struct ceph_client *cl = ceph_inode_to_client(inode);
struct rb_node **p;
struct rb_node *parent = NULL;
struct ceph_inode_xattr *xattr = NULL;
@ -626,7 +629,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
xattr->should_free_name = update_xattr;
ci->i_xattrs.count++;
dout("%s count=%d\n", __func__, ci->i_xattrs.count);
doutc(cl, "count=%d\n", ci->i_xattrs.count);
} else {
kfree(*newxattr);
*newxattr = NULL;
@ -654,13 +657,13 @@ static int __set_xattr(struct ceph_inode_info *ci,
if (new) {
rb_link_node(&xattr->node, parent, p);
rb_insert_color(&xattr->node, &ci->i_xattrs.index);
dout("%s p=%p\n", __func__, p);
doutc(cl, "p=%p\n", p);
}
dout("%s added %llx.%llx xattr %p %.*s=%.*s%s\n", __func__,
ceph_vinop(&ci->netfs.inode), xattr, name_len, name,
min(val_len, MAX_XATTR_VAL_PRINT_LEN), val,
val_len > MAX_XATTR_VAL_PRINT_LEN ? "..." : "");
doutc(cl, "added %p %llx.%llx xattr %p %.*s=%.*s%s\n", inode,
ceph_vinop(inode), xattr, name_len, name, min(val_len,
MAX_XATTR_VAL_PRINT_LEN), val,
val_len > MAX_XATTR_VAL_PRINT_LEN ? "..." : "");
return 0;
}
@ -668,6 +671,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
const char *name)
{
struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
struct rb_node **p;
struct rb_node *parent = NULL;
struct ceph_inode_xattr *xattr = NULL;
@ -688,13 +692,13 @@ static struct ceph_inode_xattr *__get_xattr(struct ceph_inode_info *ci,
else {
int len = min(xattr->val_len, MAX_XATTR_VAL_PRINT_LEN);
dout("%s %s: found %.*s%s\n", __func__, name, len,
xattr->val, xattr->val_len > len ? "..." : "");
doutc(cl, "%s found %.*s%s\n", name, len, xattr->val,
xattr->val_len > len ? "..." : "");
return xattr;
}
}
dout("%s %s: not found\n", __func__, name);
doutc(cl, "%s not found\n", name);
return NULL;
}
@ -735,19 +739,20 @@ static int __remove_xattr(struct ceph_inode_info *ci,
static char *__copy_xattr_names(struct ceph_inode_info *ci,
char *dest)
{
struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
struct rb_node *p;
struct ceph_inode_xattr *xattr = NULL;
p = rb_first(&ci->i_xattrs.index);
dout("__copy_xattr_names count=%d\n", ci->i_xattrs.count);
doutc(cl, "count=%d\n", ci->i_xattrs.count);
while (p) {
xattr = rb_entry(p, struct ceph_inode_xattr, node);
memcpy(dest, xattr->name, xattr->name_len);
dest[xattr->name_len] = '\0';
dout("dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
xattr->name_len, ci->i_xattrs.names_size);
doutc(cl, "dest=%s %p (%s) (%d/%d)\n", dest, xattr, xattr->name,
xattr->name_len, ci->i_xattrs.names_size);
dest += xattr->name_len + 1;
p = rb_next(p);
@ -758,19 +763,19 @@ static char *__copy_xattr_names(struct ceph_inode_info *ci,
void __ceph_destroy_xattrs(struct ceph_inode_info *ci)
{
struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
struct rb_node *p, *tmp;
struct ceph_inode_xattr *xattr = NULL;
p = rb_first(&ci->i_xattrs.index);
dout("__ceph_destroy_xattrs p=%p\n", p);
doutc(cl, "p=%p\n", p);
while (p) {
xattr = rb_entry(p, struct ceph_inode_xattr, node);
tmp = p;
p = rb_next(tmp);
dout("__ceph_destroy_xattrs next p=%p (%.*s)\n", p,
xattr->name_len, xattr->name);
doutc(cl, "next p=%p (%.*s)\n", p, xattr->name_len, xattr->name);
rb_erase(tmp, &ci->i_xattrs.index);
__free_xattr(xattr);
@ -787,6 +792,7 @@ static int __build_xattrs(struct inode *inode)
__releases(ci->i_ceph_lock)
__acquires(ci->i_ceph_lock)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
u32 namelen;
u32 numattr = 0;
void *p, *end;
@ -798,8 +804,8 @@ static int __build_xattrs(struct inode *inode)
int err = 0;
int i;
dout("__build_xattrs() len=%d\n",
ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
doutc(cl, "len=%d\n",
ci->i_xattrs.blob ? (int)ci->i_xattrs.blob->vec.iov_len : 0);
if (ci->i_xattrs.index_version >= ci->i_xattrs.version)
return 0; /* already built */
@ -874,6 +880,8 @@ bad:
static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
int val_size)
{
struct ceph_client *cl = ceph_inode_to_client(&ci->netfs.inode);
/*
* 4 bytes for the length, and additional 4 bytes per each xattr name,
* 4 bytes per each value
@ -881,9 +889,8 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
int size = 4 + ci->i_xattrs.count*(4 + 4) +
ci->i_xattrs.names_size +
ci->i_xattrs.vals_size;
dout("__get_required_blob_size c=%d names.size=%d vals.size=%d\n",
ci->i_xattrs.count, ci->i_xattrs.names_size,
ci->i_xattrs.vals_size);
doutc(cl, "c=%d names.size=%d vals.size=%d\n", ci->i_xattrs.count,
ci->i_xattrs.names_size, ci->i_xattrs.vals_size);
if (name_size)
size += 4 + 4 + name_size + val_size;
@ -899,12 +906,14 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
*/
struct ceph_buffer *__ceph_build_xattrs_blob(struct ceph_inode_info *ci)
{
struct inode *inode = &ci->netfs.inode;
struct ceph_client *cl = ceph_inode_to_client(inode);
struct rb_node *p;
struct ceph_inode_xattr *xattr = NULL;
struct ceph_buffer *old_blob = NULL;
void *dest;
dout("__build_xattrs_blob %p\n", &ci->netfs.inode);
doutc(cl, "%p %llx.%llx\n", inode, ceph_vinop(inode));
if (ci->i_xattrs.dirty) {
int need = __get_required_blob_size(ci, 0, 0);
@ -962,6 +971,7 @@ static inline int __get_request_mask(struct inode *in) {
ssize_t __ceph_getxattr(struct inode *inode, const char *name, void *value,
size_t size)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_inode_xattr *xattr;
struct ceph_vxattr *vxattr;
@ -1000,8 +1010,9 @@ handle_non_vxattrs:
req_mask = __get_request_mask(inode);
spin_lock(&ci->i_ceph_lock);
dout("getxattr %p name '%s' ver=%lld index_ver=%lld\n", inode, name,
ci->i_xattrs.version, ci->i_xattrs.index_version);
doutc(cl, "%p %llx.%llx name '%s' ver=%lld index_ver=%lld\n", inode,
ceph_vinop(inode), name, ci->i_xattrs.version,
ci->i_xattrs.index_version);
if (ci->i_xattrs.version == 0 ||
!((req_mask & CEPH_CAP_XATTR_SHARED) ||
@ -1010,8 +1021,9 @@ handle_non_vxattrs:
/* security module gets xattr while filling trace */
if (current->journal_info) {
pr_warn_ratelimited("sync getxattr %p "
"during filling trace\n", inode);
pr_warn_ratelimited_client(cl,
"sync %p %llx.%llx during filling trace\n",
inode, ceph_vinop(inode));
return -EBUSY;
}
@ -1053,14 +1065,16 @@ out:
ssize_t ceph_listxattr(struct dentry *dentry, char *names, size_t size)
{
struct inode *inode = d_inode(dentry);
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
bool len_only = (size == 0);
u32 namelen;
int err;
spin_lock(&ci->i_ceph_lock);
dout("listxattr %p ver=%lld index_ver=%lld\n", inode,
ci->i_xattrs.version, ci->i_xattrs.index_version);
doutc(cl, "%p %llx.%llx ver=%lld index_ver=%lld\n", inode,
ceph_vinop(inode), ci->i_xattrs.version,
ci->i_xattrs.index_version);
if (ci->i_xattrs.version == 0 ||
!__ceph_caps_issued_mask_metric(ci, CEPH_CAP_XATTR_SHARED, 1)) {
@ -1094,7 +1108,8 @@ out:
static int ceph_sync_setxattr(struct inode *inode, const char *name,
const char *value, size_t size, int flags)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
struct ceph_fs_client *fsc = ceph_sb_to_fs_client(inode->i_sb);
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req;
struct ceph_mds_client *mdsc = fsc->mdsc;
@ -1119,7 +1134,7 @@ static int ceph_sync_setxattr(struct inode *inode, const char *name,
flags |= CEPH_XATTR_REMOVE;
}
dout("setxattr value size: %zu\n", size);
doutc(cl, "name %s value size %zu\n", name, size);
/* do request */
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
@ -1148,10 +1163,10 @@ static int ceph_sync_setxattr(struct inode *inode, const char *name,
req->r_num_caps = 1;
req->r_inode_drop = CEPH_CAP_XATTR_SHARED;
dout("xattr.ver (before): %lld\n", ci->i_xattrs.version);
doutc(cl, "xattr.ver (before): %lld\n", ci->i_xattrs.version);
err = ceph_mdsc_do_request(mdsc, NULL, req);
ceph_mdsc_put_request(req);
dout("xattr.ver (after): %lld\n", ci->i_xattrs.version);
doutc(cl, "xattr.ver (after): %lld\n", ci->i_xattrs.version);
out:
if (pagelist)
@ -1162,9 +1177,10 @@ out:
int __ceph_setxattr(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
struct ceph_client *cl = ceph_inode_to_client(inode);
struct ceph_vxattr *vxattr;
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
struct ceph_mds_client *mdsc = ceph_sb_to_fs_client(inode->i_sb)->mdsc;
struct ceph_cap_flush *prealloc_cf = NULL;
struct ceph_buffer *old_blob = NULL;
int issued;
@ -1220,9 +1236,9 @@ retry:
required_blob_size = __get_required_blob_size(ci, name_len, val_len);
if ((ci->i_xattrs.version == 0) || !(issued & CEPH_CAP_XATTR_EXCL) ||
(required_blob_size > mdsc->mdsmap->m_max_xattr_size)) {
dout("%s do sync setxattr: version: %llu size: %d max: %llu\n",
__func__, ci->i_xattrs.version, required_blob_size,
mdsc->mdsmap->m_max_xattr_size);
doutc(cl, "sync version: %llu size: %d max: %llu\n",
ci->i_xattrs.version, required_blob_size,
mdsc->mdsmap->m_max_xattr_size);
goto do_sync;
}
@ -1236,8 +1252,8 @@ retry:
}
}
dout("setxattr %p name '%s' issued %s\n", inode, name,
ceph_cap_string(issued));
doutc(cl, "%p %llx.%llx name '%s' issued %s\n", inode,
ceph_vinop(inode), name, ceph_cap_string(issued));
__build_xattrs(inode);
if (!ci->i_xattrs.prealloc_blob ||
@ -1246,7 +1262,8 @@ retry:
spin_unlock(&ci->i_ceph_lock);
ceph_buffer_put(old_blob); /* Shouldn't be required */
dout(" pre-allocating new blob size=%d\n", required_blob_size);
doutc(cl, " pre-allocating new blob size=%d\n",
required_blob_size);
blob = ceph_buffer_new(required_blob_size, GFP_NOFS);
if (!blob)
goto do_sync_unlocked;
@ -1285,8 +1302,9 @@ do_sync_unlocked:
/* security module set xattr while filling trace */
if (current->journal_info) {
pr_warn_ratelimited("sync setxattr %p "
"during filling trace\n", inode);
pr_warn_ratelimited_client(cl,
"sync %p %llx.%llx during filling trace\n",
inode, ceph_vinop(inode));
err = -EBUSY;
} else {
err = ceph_sync_setxattr(inode, name, value, size, flags);

View File

@ -256,6 +256,7 @@ struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap)
return idmap;
}
EXPORT_SYMBOL_GPL(mnt_idmap_get);
/**
* mnt_idmap_put - put a reference to an idmapping
@ -271,3 +272,4 @@ void mnt_idmap_put(struct mnt_idmap *idmap)
kfree(idmap);
}
}
EXPORT_SYMBOL_GPL(mnt_idmap_put);

View File

@ -19,12 +19,25 @@
pr_debug("%.*s %12.12s:%-4d : " fmt, \
8 - (int)sizeof(KBUILD_MODNAME), " ", \
kbasename(__FILE__), __LINE__, ##__VA_ARGS__)
# define doutc(client, fmt, ...) \
pr_debug("%.*s %12.12s:%-4d : [%pU %llu] " fmt, \
8 - (int)sizeof(KBUILD_MODNAME), " ", \
kbasename(__FILE__), __LINE__, \
&client->fsid, client->monc.auth->global_id, \
##__VA_ARGS__)
# else
/* faux printk call just to see any compiler warnings. */
# define dout(fmt, ...) do { \
if (0) \
printk(KERN_DEBUG fmt, ##__VA_ARGS__); \
} while (0)
# define doutc(client, fmt, ...) do { \
if (0) \
printk(KERN_DEBUG "[%pU %llu] " fmt, \
&client->fsid, \
client->monc.auth->global_id, \
##__VA_ARGS__); \
} while (0)
# endif
#else
@ -33,7 +46,32 @@
* or, just wrap pr_debug
*/
# define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__)
# define doutc(client, fmt, ...) \
pr_debug(" [%pU %llu] %s: " fmt, &client->fsid, \
client->monc.auth->global_id, __func__, ##__VA_ARGS__)
#endif
#define pr_notice_client(client, fmt, ...) \
pr_notice("[%pU %llu]: " fmt, &client->fsid, \
client->monc.auth->global_id, ##__VA_ARGS__)
#define pr_info_client(client, fmt, ...) \
pr_info("[%pU %llu]: " fmt, &client->fsid, \
client->monc.auth->global_id, ##__VA_ARGS__)
#define pr_warn_client(client, fmt, ...) \
pr_warn("[%pU %llu]: " fmt, &client->fsid, \
client->monc.auth->global_id, ##__VA_ARGS__)
#define pr_warn_once_client(client, fmt, ...) \
pr_warn_once("[%pU %llu]: " fmt, &client->fsid, \
client->monc.auth->global_id, ##__VA_ARGS__)
#define pr_err_client(client, fmt, ...) \
pr_err("[%pU %llu]: " fmt, &client->fsid, \
client->monc.auth->global_id, ##__VA_ARGS__)
#define pr_warn_ratelimited_client(client, fmt, ...) \
pr_warn_ratelimited("[%pU %llu]: " fmt, &client->fsid, \
client->monc.auth->global_id, ##__VA_ARGS__)
#define pr_err_ratelimited_client(client, fmt, ...) \
pr_err_ratelimited("[%pU %llu]: " fmt, &client->fsid, \
client->monc.auth->global_id, ##__VA_ARGS__)
#endif

View File

@ -357,6 +357,11 @@ enum {
CEPH_MDS_OP_RENAMESNAP = 0x01403,
};
#define IS_CEPH_MDS_OP_NEWINODE(op) (op == CEPH_MDS_OP_CREATE || \
op == CEPH_MDS_OP_MKNOD || \
op == CEPH_MDS_OP_MKDIR || \
op == CEPH_MDS_OP_SYMLINK)
extern const char *ceph_mds_op_name(int op);
#define CEPH_SETATTR_MODE (1 << 0)
@ -497,7 +502,7 @@ struct ceph_mds_request_head_legacy {
union ceph_mds_request_args args;
} __attribute__ ((packed));
#define CEPH_MDS_REQUEST_HEAD_VERSION 2
#define CEPH_MDS_REQUEST_HEAD_VERSION 3
struct ceph_mds_request_head_old {
__le16 version; /* struct version */
@ -528,6 +533,9 @@ struct ceph_mds_request_head {
__le32 ext_num_retry; /* new count retry attempts */
__le32 ext_num_fwd; /* new count fwd attempts */
__le32 struct_len; /* to store size of struct ceph_mds_request_head */
__le32 owner_uid, owner_gid; /* used for OPs which create inodes */
} __attribute__ ((packed));
/* cap/lease release record */

View File

@ -115,6 +115,9 @@ static inline bool vfsgid_eq_kgid(vfsgid_t vfsgid, kgid_t kgid)
int vfsgid_in_group_p(vfsgid_t vfsgid);
struct mnt_idmap *mnt_idmap_get(struct mnt_idmap *idmap);
void mnt_idmap_put(struct mnt_idmap *idmap);
vfsuid_t make_vfsuid(struct mnt_idmap *idmap,
struct user_namespace *fs_userns, kuid_t kuid);