ceph: use empty snap context for uninline_data and get_pool_perm

Cached_context in ceph_snap_realm is directly accessed by
uninline_data() and get_pool_perm(). This is racy in theory.
both uninline_data() and get_pool_perm() do not modify existing
object, they only create new object. So we can pass the empty
snap context to them.  Unlike cached_context in ceph_snap_realm,
we do not need to protect the empty snap context.

Signed-off-by: Yan, Zheng <zyan@redhat.com>
This commit is contained in:
Yan, Zheng 2015-05-01 10:03:40 +08:00 committed by Ilya Dryomov
parent b01da6a08c
commit 7b06a826e7
3 changed files with 14 additions and 14 deletions

View file

@ -1510,8 +1510,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
ceph_vino(inode), 0, &len, 0, 1,
CEPH_OSD_OP_CREATE,
CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
ci->i_snap_realm->cached_context,
0, 0, false);
ceph_empty_snapc, 0, 0, false);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out;
@ -1529,7 +1528,7 @@ int ceph_uninline_data(struct file *filp, struct page *locked_page)
ceph_vino(inode), 0, &len, 1, 3,
CEPH_OSD_OP_WRITE,
CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
ci->i_snap_realm->cached_context,
ceph_empty_snapc,
ci->i_truncate_seq, ci->i_truncate_size,
false);
if (IS_ERR(req)) {
@ -1653,7 +1652,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
}
rd_req = ceph_osdc_alloc_request(&fsc->client->osdc,
ci->i_snap_realm->cached_context,
ceph_empty_snapc,
1, false, GFP_NOFS);
if (!rd_req) {
err = -ENOMEM;
@ -1668,7 +1667,7 @@ static int __ceph_pool_perm_get(struct ceph_inode_info *ci, u32 pool)
rd_req->r_base_oid.name_len = strlen(rd_req->r_base_oid.name);
wr_req = ceph_osdc_alloc_request(&fsc->client->osdc,
ci->i_snap_realm->cached_context,
ceph_empty_snapc,
1, false, GFP_NOFS);
if (!wr_req) {
err = -ENOMEM;

View file

@ -296,7 +296,7 @@ static int cmpu64_rev(const void *a, const void *b)
}
static struct ceph_snap_context *empty_snapc;
struct ceph_snap_context *ceph_empty_snapc;
/*
* build the snap context for a given realm.
@ -338,9 +338,9 @@ static int build_snap_context(struct ceph_snap_realm *realm)
return 0;
}
if (num == 0 && realm->seq == empty_snapc->seq) {
ceph_get_snap_context(empty_snapc);
snapc = empty_snapc;
if (num == 0 && realm->seq == ceph_empty_snapc->seq) {
ceph_get_snap_context(ceph_empty_snapc);
snapc = ceph_empty_snapc;
goto done;
}
@ -482,7 +482,7 @@ void ceph_queue_cap_snap(struct ceph_inode_info *ci)
cap_snap. lucky us. */
dout("queue_cap_snap %p already pending\n", inode);
kfree(capsnap);
} else if (ci->i_snap_realm->cached_context == empty_snapc) {
} else if (ci->i_snap_realm->cached_context == ceph_empty_snapc) {
dout("queue_cap_snap %p empty snapc\n", inode);
kfree(capsnap);
} else if (dirty & (CEPH_CAP_AUTH_EXCL|CEPH_CAP_XATTR_EXCL|
@ -964,14 +964,14 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
int __init ceph_snap_init(void)
{
empty_snapc = ceph_create_snap_context(0, GFP_NOFS);
if (!empty_snapc)
ceph_empty_snapc = ceph_create_snap_context(0, GFP_NOFS);
if (!ceph_empty_snapc)
return -ENOMEM;
empty_snapc->seq = 1;
ceph_empty_snapc->seq = 1;
return 0;
}
void ceph_snap_exit(void)
{
ceph_put_snap_context(empty_snapc);
ceph_put_snap_context(ceph_empty_snapc);
}

View file

@ -692,6 +692,7 @@ static inline int default_congestion_kb(void)
/* snap.c */
extern struct ceph_snap_context *ceph_empty_snapc;
struct ceph_snap_realm *ceph_lookup_snap_realm(struct ceph_mds_client *mdsc,
u64 ino);
extern void ceph_get_snap_realm(struct ceph_mds_client *mdsc,