dlm for 5.19

This set includes several large patches to improve endian handling and
 remove sparse warnings.  The code previously used in/out, in-place
 endianness conversion functions.  Other code cleanup includes the list
 iterator changes.  Finally, a long standing bug was found and fixed,
 caused by missed decrement on an lock struct ref count.
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJijT7zAAoJEDgbc8f8gGmqQxUQAIJiDJEKmdiXWn4s8arqHHp3
 d5Teh8SCIwF/pVvpFKzibbAfqD5OzulbQneAJSy+tz8r6uW8QaOSADcQa3Lit1yw
 7wMQGn38VMsGw20y087sOdqOxTdVir/OwzjMCNoUtGOddhQ4XmNY06++O8tAiMFt
 EroGRfvVC2Y7UI78ntvK49Z9errAxhEKpJMAG5yiA4lt12vSaORoJOTMWfIcVdWa
 htjkPSf0W7W5kbvFGW+/wSSt9QGuGTDmjPY+PVqOmIHgVKQkcfrCCbkCjeXOxNav
 1XhoENIxL4FPbkRLnLxrJTpIbbz42H8F8AX82ra+AH8gEy5xBOd2ltA4ozTjBBNE
 /EmsSrfSPcHQOlFGXHv9gF2fRcOPAg6q+edNruIdk2hNb4LoTBtZJcwoqVbaNSdq
 b9V8U8vzSJDVxOi/wdyf9/8oRmYd6FcNtgEiDAA59IfqCHN/bQsVzoONKPB1C0PZ
 fCy4Pm2bbwffqCLC0d0QO0cs2Ow7mRgIWnPafzhXQnIJzNx6ZBx8bvmQNWUMg3Mg
 Ys+tbuN6I53dbYtbAgmLwOP1/4tyMi7vTAli007EZx1XhjXWvnOLxHN1GYFrcbLB
 UUWdru7PxIEEpb+HQDMd0I8lY9qG22Sem9UkzP6N9478ysIAbnZp5s05lucJ7ZeN
 l/XDofB5+CpL+8B7QxRR
 =aFyJ
 -----END PGP SIGNATURE-----

Merge tag 'dlm-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm

Pull dlm updates from David Teigland:
 "This includes several large patches to improve endian handling and
  remove sparse warnings. The code previously used in/out, in-place
  endianness conversion functions.

  Other code cleanup includes the list iterator changes.

  Finally, a long standing bug was found and fixed, caused by missed
  decrement on an lock struct ref count"

* tag 'dlm-5.19' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm: (28 commits)
  dlm: use kref_put_lock in __put_lkb
  dlm: use kref_put_lock in put_rsb
  dlm: remove unnecessary error assign
  dlm: fix missing lkb refcount handling
  fs: dlm: cast resource pointer to uintptr_t
  dlm: replace usage of found with dedicated list iterator variable
  dlm: remove usage of list iterator for list_add() after the loop body
  dlm: fix pending remove if msg allocation fails
  dlm: fix wake_up() calls for pending remove
  dlm: check required context while close
  dlm: cleanup lock handling in dlm_master_lookup
  dlm: remove found label in dlm_master_lookup
  dlm: remove __user conversion warnings
  dlm: move conversion to compile time
  dlm: use __le types for dlm messages
  dlm: use __le types for rcom messages
  dlm: use __le types for dlm header
  dlm: use __le types for options header
  dlm: add __CHECKER__ for false positives
  dlm: move global to static inits
  ...
This commit is contained in:
Linus Torvalds 2022-05-24 19:09:16 -07:00
commit f289811258
15 changed files with 633 additions and 669 deletions

View File

@ -101,7 +101,7 @@ int dlm_recover_directory(struct dlm_ls *ls)
*/
b = ls->ls_recover_buf->rc_buf;
left = ls->ls_recover_buf->rc_header.h_length;
left = le16_to_cpu(ls->ls_recover_buf->rc_header.h_length);
left -= sizeof(struct dlm_rcom);
for (;;) {

View File

@ -379,15 +379,15 @@ static inline int rsb_flag(struct dlm_rsb *r, enum rsb_flags flag)
#define DLM_FIN 5
struct dlm_header {
uint32_t h_version;
__le32 h_version;
union {
/* for DLM_MSG and DLM_RCOM */
uint32_t h_lockspace;
__le32 h_lockspace;
/* for DLM_ACK and DLM_OPTS */
uint32_t h_seq;
__le32 h_seq;
} u;
uint32_t h_nodeid; /* nodeid of sender */
uint16_t h_length;
__le32 h_nodeid; /* nodeid of sender */
__le16 h_length;
uint8_t h_cmd; /* DLM_MSG, DLM_RCOM */
uint8_t h_pad;
};
@ -409,24 +409,24 @@ struct dlm_header {
struct dlm_message {
struct dlm_header m_header;
uint32_t m_type; /* DLM_MSG_ */
uint32_t m_nodeid;
uint32_t m_pid;
uint32_t m_lkid; /* lkid on sender */
uint32_t m_remid; /* lkid on receiver */
uint32_t m_parent_lkid;
uint32_t m_parent_remid;
uint32_t m_exflags;
uint32_t m_sbflags;
uint32_t m_flags;
uint32_t m_lvbseq;
uint32_t m_hash;
int m_status;
int m_grmode;
int m_rqmode;
int m_bastmode;
int m_asts;
int m_result; /* 0 or -EXXX */
__le32 m_type; /* DLM_MSG_ */
__le32 m_nodeid;
__le32 m_pid;
__le32 m_lkid; /* lkid on sender */
__le32 m_remid; /* lkid on receiver */
__le32 m_parent_lkid;
__le32 m_parent_remid;
__le32 m_exflags;
__le32 m_sbflags;
__le32 m_flags;
__le32 m_lvbseq;
__le32 m_hash;
__le32 m_status;
__le32 m_grmode;
__le32 m_rqmode;
__le32 m_bastmode;
__le32 m_asts;
__le32 m_result; /* 0 or -EXXX */
char m_extra[]; /* name or lvb */
};
@ -451,18 +451,18 @@ struct dlm_message {
struct dlm_rcom {
struct dlm_header rc_header;
uint32_t rc_type; /* DLM_RCOM_ */
int rc_result; /* multi-purpose */
uint64_t rc_id; /* match reply with request */
uint64_t rc_seq; /* sender's ls_recover_seq */
uint64_t rc_seq_reply; /* remote ls_recover_seq */
__le32 rc_type; /* DLM_RCOM_ */
__le32 rc_result; /* multi-purpose */
__le64 rc_id; /* match reply with request */
__le64 rc_seq; /* sender's ls_recover_seq */
__le64 rc_seq_reply; /* remote ls_recover_seq */
char rc_buf[];
};
struct dlm_opt_header {
uint16_t t_type;
uint16_t t_length;
uint32_t t_pad;
__le16 t_type;
__le16 t_length;
__le32 t_pad;
/* need to be 8 byte aligned */
char t_value[];
};
@ -472,8 +472,8 @@ struct dlm_opts {
struct dlm_header o_header;
uint8_t o_nextcmd;
uint8_t o_pad;
uint16_t o_optlen;
uint32_t o_pad2;
__le16 o_optlen;
__le32 o_pad2;
char o_opts[];
};

File diff suppressed because it is too large Load Diff

View File

@ -922,3 +922,15 @@ void dlm_stop_lockspaces(void)
log_print("dlm user daemon left %d lockspaces", count);
}
void dlm_stop_lockspaces_check(void)
{
struct dlm_ls *ls;
spin_lock(&lslist_lock);
list_for_each_entry(ls, &lslist, ls_list) {
if (WARN_ON(!rwsem_is_locked(&ls->ls_in_recovery) ||
!dlm_locking_stopped(ls)))
break;
}
spin_unlock(&lslist_lock);
}

View File

@ -19,6 +19,7 @@ struct dlm_ls *dlm_find_lockspace_local(void *id);
struct dlm_ls *dlm_find_lockspace_device(int minor);
void dlm_put_lockspace(struct dlm_ls *ls);
void dlm_stop_lockspaces(void);
void dlm_stop_lockspaces_check(void);
#endif /* __LOCKSPACE_DOT_H__ */

View File

@ -1303,6 +1303,10 @@ static struct dlm_msg *dlm_lowcomms_new_msg_con(struct connection *con, int len,
return msg;
}
/* avoid false positive for nodes_srcu, unlock happens in
* dlm_lowcomms_commit_msg which is a must call if success
*/
#ifndef __CHECKER__
struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
char **ppc, void (*cb)(void *data),
void *data)
@ -1336,6 +1340,7 @@ struct dlm_msg *dlm_lowcomms_new_msg(int nodeid, int len, gfp_t allocation,
msg->idx = idx;
return msg;
}
#endif
static void _dlm_lowcomms_commit_msg(struct dlm_msg *msg)
{
@ -1362,11 +1367,16 @@ out:
return;
}
/* avoid false positive for nodes_srcu, lock was happen in
* dlm_lowcomms_new_msg
*/
#ifndef __CHECKER__
void dlm_lowcomms_commit_msg(struct dlm_msg *msg)
{
_dlm_lowcomms_commit_msg(msg);
srcu_read_unlock(&connections_srcu, msg->idx);
}
#endif
void dlm_lowcomms_put_msg(struct dlm_msg *msg)
{
@ -1789,7 +1799,7 @@ static int dlm_listen_for_all(void)
SOCK_STREAM, dlm_proto_ops->proto, &sock);
if (result < 0) {
log_print("Can't create comms socket: %d", result);
goto out;
return result;
}
sock_set_mark(sock->sk, dlm_config.ci_mark);

View File

@ -20,7 +20,7 @@
int dlm_slots_version(struct dlm_header *h)
{
if ((h->h_version & 0x0000FFFF) < DLM_HEADER_SLOTS)
if ((le32_to_cpu(h->h_version) & 0x0000FFFF) < DLM_HEADER_SLOTS)
return 0;
return 1;
}
@ -120,18 +120,13 @@ int dlm_slots_copy_in(struct dlm_ls *ls)
ro0 = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config));
for (i = 0, ro = ro0; i < num_slots; i++, ro++) {
ro->ro_nodeid = le32_to_cpu(ro->ro_nodeid);
ro->ro_slot = le16_to_cpu(ro->ro_slot);
}
log_slots(ls, gen, num_slots, ro0, NULL, 0);
list_for_each_entry(memb, &ls->ls_nodes, list) {
for (i = 0, ro = ro0; i < num_slots; i++, ro++) {
if (ro->ro_nodeid != memb->nodeid)
if (le32_to_cpu(ro->ro_nodeid) != memb->nodeid)
continue;
memb->slot = ro->ro_slot;
memb->slot = le16_to_cpu(ro->ro_slot);
memb->slot_prev = memb->slot;
break;
}

View File

@ -135,6 +135,7 @@
#include <net/tcp.h>
#include "dlm_internal.h"
#include "lockspace.h"
#include "lowcomms.h"
#include "config.h"
#include "memory.h"
@ -380,13 +381,12 @@ static int dlm_send_ack(int nodeid, uint32_t seq)
m_header = (struct dlm_header *)ppc;
m_header->h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
m_header->h_nodeid = dlm_our_nodeid();
m_header->h_length = mb_len;
m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid());
m_header->h_length = cpu_to_le16(mb_len);
m_header->h_cmd = DLM_ACK;
m_header->u.h_seq = seq;
m_header->u.h_seq = cpu_to_le32(seq);
header_out(m_header);
dlm_lowcomms_commit_msg(msg);
dlm_lowcomms_put_msg(msg);
@ -409,13 +409,11 @@ static int dlm_send_fin(struct midcomms_node *node,
m_header = (struct dlm_header *)ppc;
m_header->h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
m_header->h_nodeid = dlm_our_nodeid();
m_header->h_length = mb_len;
m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid());
m_header->h_length = cpu_to_le16(mb_len);
m_header->h_cmd = DLM_FIN;
header_out(m_header);
pr_debug("sending fin msg to node %d\n", node->nodeid);
dlm_midcomms_commit_mhandle(mh);
set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
@ -574,14 +572,14 @@ dlm_midcomms_recv_node_lookup(int nodeid, const union dlm_packet *p,
return NULL;
}
switch (le32_to_cpu(p->rcom.rc_type)) {
case DLM_RCOM_NAMES:
switch (p->rcom.rc_type) {
case cpu_to_le32(DLM_RCOM_NAMES):
fallthrough;
case DLM_RCOM_NAMES_REPLY:
case cpu_to_le32(DLM_RCOM_NAMES_REPLY):
fallthrough;
case DLM_RCOM_STATUS:
case cpu_to_le32(DLM_RCOM_STATUS):
fallthrough;
case DLM_RCOM_STATUS_REPLY:
case cpu_to_le32(DLM_RCOM_STATUS_REPLY):
node = nodeid2node(nodeid, 0);
if (node) {
spin_lock(&node->state_lock);
@ -741,14 +739,14 @@ static void dlm_midcomms_receive_buffer_3_2(union dlm_packet *p, int nodeid)
*
* length already checked.
*/
switch (le32_to_cpu(p->rcom.rc_type)) {
case DLM_RCOM_NAMES:
switch (p->rcom.rc_type) {
case cpu_to_le32(DLM_RCOM_NAMES):
fallthrough;
case DLM_RCOM_NAMES_REPLY:
case cpu_to_le32(DLM_RCOM_NAMES_REPLY):
fallthrough;
case DLM_RCOM_STATUS:
case cpu_to_le32(DLM_RCOM_STATUS):
fallthrough;
case DLM_RCOM_STATUS_REPLY:
case cpu_to_le32(DLM_RCOM_STATUS_REPLY):
break;
default:
log_print("unsupported rcom type received: %u, will skip this message from node %d",
@ -1020,11 +1018,10 @@ static void dlm_fill_opts_header(struct dlm_opts *opts, uint16_t inner_len,
uint32_t seq)
{
opts->o_header.h_cmd = DLM_OPTS;
opts->o_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
opts->o_header.h_nodeid = dlm_our_nodeid();
opts->o_header.h_length = DLM_MIDCOMMS_OPT_LEN + inner_len;
opts->o_header.u.h_seq = seq;
header_out(&opts->o_header);
opts->o_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
opts->o_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
opts->o_header.h_length = cpu_to_le16(DLM_MIDCOMMS_OPT_LEN + inner_len);
opts->o_header.u.h_seq = cpu_to_le32(seq);
}
static void midcomms_new_msg_cb(void *data)
@ -1062,6 +1059,10 @@ static struct dlm_msg *dlm_midcomms_get_msg_3_2(struct dlm_mhandle *mh, int node
return msg;
}
/* avoid false positive for nodes_srcu, unlock happens in
* dlm_midcomms_commit_mhandle which is a must call if success
*/
#ifndef __CHECKER__
struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len,
gfp_t allocation, char **ppc)
{
@ -1127,6 +1128,7 @@ err:
srcu_read_unlock(&nodes_srcu, idx);
return NULL;
}
#endif
static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh)
{
@ -1136,6 +1138,10 @@ static void dlm_midcomms_commit_msg_3_2(struct dlm_mhandle *mh)
dlm_lowcomms_commit_msg(mh->msg);
}
/* avoid false positive for nodes_srcu, lock was happen in
* dlm_midcomms_get_mhandle
*/
#ifndef __CHECKER__
void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh)
{
switch (mh->node->version) {
@ -1157,6 +1163,7 @@ void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh)
break;
}
}
#endif
int dlm_midcomms_start(void)
{
@ -1406,6 +1413,8 @@ int dlm_midcomms_close(int nodeid)
if (nodeid == dlm_our_nodeid())
return 0;
dlm_stop_lockspaces_check();
idx = srcu_read_lock(&nodes_srcu);
/* Abort pending close/remove operation */
node = nodeid2node(nodeid, 0);
@ -1455,7 +1464,7 @@ static void midcomms_new_rawmsg_cb(void *data)
switch (h->h_cmd) {
case DLM_OPTS:
if (!h->u.h_seq)
h->u.h_seq = rd->node->seq_send++;
h->u.h_seq = cpu_to_le32(rd->node->seq_send++);
break;
default:
break;

View File

@ -13,27 +13,27 @@
#include "dlm_internal.h"
#include "lockspace.h"
static spinlock_t ops_lock;
static struct list_head send_list;
static struct list_head recv_list;
static wait_queue_head_t send_wq;
static wait_queue_head_t recv_wq;
static DEFINE_SPINLOCK(ops_lock);
static LIST_HEAD(send_list);
static LIST_HEAD(recv_list);
static DECLARE_WAIT_QUEUE_HEAD(send_wq);
static DECLARE_WAIT_QUEUE_HEAD(recv_wq);
struct plock_async_data {
void *fl;
void *file;
struct file_lock flc;
int (*callback)(struct file_lock *fl, int result);
};
struct plock_op {
struct list_head list;
int done;
struct dlm_plock_info info;
/* if set indicates async handling */
struct plock_async_data *data;
};
struct plock_xop {
struct plock_op xop;
int (*callback)(struct file_lock *fl, int result);
void *fl;
void *file;
struct file_lock flc;
};
static inline void set_version(struct dlm_plock_info *info)
{
info->version[0] = DLM_PLOCK_VERSION_MAJOR;
@ -58,10 +58,15 @@ static int check_version(struct dlm_plock_info *info)
return 0;
}
static void dlm_release_plock_op(struct plock_op *op)
{
kfree(op->data);
kfree(op);
}
static void send_op(struct plock_op *op)
{
set_version(&op->info);
INIT_LIST_HEAD(&op->list);
spin_lock(&ops_lock);
list_add_tail(&op->list, &send_list);
spin_unlock(&ops_lock);
@ -101,22 +106,21 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number,
int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
int cmd, struct file_lock *fl)
{
struct plock_async_data *op_data;
struct dlm_ls *ls;
struct plock_op *op;
struct plock_xop *xop;
int rv;
ls = dlm_find_lockspace_local(lockspace);
if (!ls)
return -EINVAL;
xop = kzalloc(sizeof(*xop), GFP_NOFS);
if (!xop) {
op = kzalloc(sizeof(*op), GFP_NOFS);
if (!op) {
rv = -ENOMEM;
goto out;
}
op = &xop->xop;
op->info.optype = DLM_PLOCK_OP_LOCK;
op->info.pid = fl->fl_pid;
op->info.ex = (fl->fl_type == F_WRLCK);
@ -125,46 +129,49 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
op->info.number = number;
op->info.start = fl->fl_start;
op->info.end = fl->fl_end;
/* async handling */
if (fl->fl_lmops && fl->fl_lmops->lm_grant) {
op_data = kzalloc(sizeof(*op_data), GFP_NOFS);
if (!op_data) {
dlm_release_plock_op(op);
rv = -ENOMEM;
goto out;
}
/* fl_owner is lockd which doesn't distinguish
processes on the nfs client */
op->info.owner = (__u64) fl->fl_pid;
xop->callback = fl->fl_lmops->lm_grant;
locks_init_lock(&xop->flc);
locks_copy_lock(&xop->flc, fl);
xop->fl = fl;
xop->file = file;
op_data->callback = fl->fl_lmops->lm_grant;
locks_init_lock(&op_data->flc);
locks_copy_lock(&op_data->flc, fl);
op_data->fl = fl;
op_data->file = file;
op->data = op_data;
send_op(op);
rv = FILE_LOCK_DEFERRED;
goto out;
} else {
op->info.owner = (__u64)(long) fl->fl_owner;
xop->callback = NULL;
}
send_op(op);
if (xop->callback == NULL) {
rv = wait_event_interruptible(recv_wq, (op->done != 0));
if (rv == -ERESTARTSYS) {
log_debug(ls, "dlm_posix_lock: wait killed %llx",
(unsigned long long)number);
spin_lock(&ops_lock);
list_del(&op->list);
spin_unlock(&ops_lock);
kfree(xop);
do_unlock_close(ls, number, file, fl);
goto out;
}
} else {
rv = FILE_LOCK_DEFERRED;
rv = wait_event_interruptible(recv_wq, (op->done != 0));
if (rv == -ERESTARTSYS) {
spin_lock(&ops_lock);
list_del(&op->list);
spin_unlock(&ops_lock);
log_print("%s: wait interrupted %x %llx, op removed",
__func__, ls->ls_global_id,
(unsigned long long)number);
dlm_release_plock_op(op);
do_unlock_close(ls, number, file, fl);
goto out;
}
spin_lock(&ops_lock);
if (!list_empty(&op->list)) {
log_error(ls, "dlm_posix_lock: op on list %llx",
(unsigned long long)number);
list_del(&op->list);
}
spin_unlock(&ops_lock);
WARN_ON(!list_empty(&op->list));
rv = op->info.rv;
@ -174,7 +181,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
(unsigned long long)number);
}
kfree(xop);
dlm_release_plock_op(op);
out:
dlm_put_lockspace(ls);
return rv;
@ -184,26 +191,20 @@ EXPORT_SYMBOL_GPL(dlm_posix_lock);
/* Returns failure iff a successful lock operation should be canceled */
static int dlm_plock_callback(struct plock_op *op)
{
struct plock_async_data *op_data = op->data;
struct file *file;
struct file_lock *fl;
struct file_lock *flc;
int (*notify)(struct file_lock *fl, int result) = NULL;
struct plock_xop *xop = (struct plock_xop *)op;
int rv = 0;
spin_lock(&ops_lock);
if (!list_empty(&op->list)) {
log_print("dlm_plock_callback: op on list %llx",
(unsigned long long)op->info.number);
list_del(&op->list);
}
spin_unlock(&ops_lock);
WARN_ON(!list_empty(&op->list));
/* check if the following 2 are still valid or make a copy */
file = xop->file;
flc = &xop->flc;
fl = xop->fl;
notify = xop->callback;
file = op_data->file;
flc = &op_data->flc;
fl = op_data->fl;
notify = op_data->callback;
if (op->info.rv) {
notify(fl, op->info.rv);
@ -234,7 +235,7 @@ static int dlm_plock_callback(struct plock_op *op)
}
out:
kfree(xop);
dlm_release_plock_op(op);
return rv;
}
@ -290,13 +291,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
send_op(op);
wait_event(recv_wq, (op->done != 0));
spin_lock(&ops_lock);
if (!list_empty(&op->list)) {
log_error(ls, "dlm_posix_unlock: op on list %llx",
(unsigned long long)number);
list_del(&op->list);
}
spin_unlock(&ops_lock);
WARN_ON(!list_empty(&op->list));
rv = op->info.rv;
@ -304,7 +299,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
rv = 0;
out_free:
kfree(op);
dlm_release_plock_op(op);
out:
dlm_put_lockspace(ls);
fl->fl_flags = fl_flags;
@ -344,13 +339,7 @@ int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
send_op(op);
wait_event(recv_wq, (op->done != 0));
spin_lock(&ops_lock);
if (!list_empty(&op->list)) {
log_error(ls, "dlm_posix_get: op on list %llx",
(unsigned long long)number);
list_del(&op->list);
}
spin_unlock(&ops_lock);
WARN_ON(!list_empty(&op->list));
/* info.rv from userspace is 1 for conflict, 0 for no-conflict,
-ENOENT if there are no locks on the file */
@ -370,7 +359,7 @@ int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
rv = 0;
}
kfree(op);
dlm_release_plock_op(op);
out:
dlm_put_lockspace(ls);
return rv;
@ -406,7 +395,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
(the process did not make an unlock call). */
if (op->info.flags & DLM_PLOCK_FL_CLOSE)
kfree(op);
dlm_release_plock_op(op);
if (copy_to_user(u, &info, sizeof(info)))
return -EFAULT;
@ -418,9 +407,9 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count,
static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
loff_t *ppos)
{
struct plock_op *op = NULL, *iter;
struct dlm_plock_info info;
struct plock_op *op;
int found = 0, do_callback = 0;
int do_callback = 0;
if (count != sizeof(info))
return -EINVAL;
@ -432,31 +421,30 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
return -EINVAL;
spin_lock(&ops_lock);
list_for_each_entry(op, &recv_list, list) {
if (op->info.fsid == info.fsid &&
op->info.number == info.number &&
op->info.owner == info.owner) {
struct plock_xop *xop = (struct plock_xop *)op;
list_del_init(&op->list);
memcpy(&op->info, &info, sizeof(info));
if (xop->callback)
list_for_each_entry(iter, &recv_list, list) {
if (iter->info.fsid == info.fsid &&
iter->info.number == info.number &&
iter->info.owner == info.owner) {
list_del_init(&iter->list);
memcpy(&iter->info, &info, sizeof(info));
if (iter->data)
do_callback = 1;
else
op->done = 1;
found = 1;
iter->done = 1;
op = iter;
break;
}
}
spin_unlock(&ops_lock);
if (found) {
if (op) {
if (do_callback)
dlm_plock_callback(op);
else
wake_up(&recv_wq);
} else
log_print("dev_write no op %x %llx", info.fsid,
(unsigned long long)info.number);
log_print("%s: no op %x %llx - may got interrupted?", __func__,
info.fsid, (unsigned long long)info.number);
return count;
}
@ -492,12 +480,6 @@ int dlm_plock_init(void)
{
int rv;
spin_lock_init(&ops_lock);
INIT_LIST_HEAD(&send_list);
INIT_LIST_HEAD(&recv_list);
init_waitqueue_head(&send_wq);
init_waitqueue_head(&recv_wq);
rv = misc_register(&plock_dev_misc);
if (rv)
log_print("dlm_plock_init: misc_register failed %d", rv);

View File

@ -34,16 +34,16 @@ static void _create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
rc = (struct dlm_rcom *) mb;
rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
rc->rc_header.u.h_lockspace = ls->ls_global_id;
rc->rc_header.h_nodeid = dlm_our_nodeid();
rc->rc_header.h_length = mb_len;
rc->rc_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
rc->rc_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id);
rc->rc_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
rc->rc_header.h_length = cpu_to_le16(mb_len);
rc->rc_header.h_cmd = DLM_RCOM;
rc->rc_type = type;
rc->rc_type = cpu_to_le32(type);
spin_lock(&ls->ls_recover_lock);
rc->rc_seq = ls->ls_recover_seq;
rc->rc_seq = cpu_to_le64(ls->ls_recover_seq);
spin_unlock(&ls->ls_recover_lock);
*rc_ret = rc;
@ -91,13 +91,11 @@ static int create_rcom_stateless(struct dlm_ls *ls, int to_nodeid, int type,
static void send_rcom(struct dlm_mhandle *mh, struct dlm_rcom *rc)
{
dlm_rcom_out(rc);
dlm_midcomms_commit_mhandle(mh);
}
static void send_rcom_stateless(struct dlm_msg *msg, struct dlm_rcom *rc)
{
dlm_rcom_out(rc);
dlm_lowcomms_commit_msg(msg);
dlm_lowcomms_put_msg(msg);
}
@ -127,10 +125,10 @@ static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
{
struct rcom_config *rf = (struct rcom_config *) rc->rc_buf;
if ((rc->rc_header.h_version & 0xFFFF0000) != DLM_HEADER_MAJOR) {
if ((le32_to_cpu(rc->rc_header.h_version) & 0xFFFF0000) != DLM_HEADER_MAJOR) {
log_error(ls, "version mismatch: %x nodeid %d: %x",
DLM_HEADER_MAJOR | DLM_HEADER_MINOR, nodeid,
rc->rc_header.h_version);
le32_to_cpu(rc->rc_header.h_version));
return -EPROTO;
}
@ -145,10 +143,10 @@ static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
return 0;
}
static void allow_sync_reply(struct dlm_ls *ls, uint64_t *new_seq)
static void allow_sync_reply(struct dlm_ls *ls, __le64 *new_seq)
{
spin_lock(&ls->ls_rcom_spin);
*new_seq = ++ls->ls_rcom_seq;
*new_seq = cpu_to_le64(++ls->ls_rcom_seq);
set_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
spin_unlock(&ls->ls_rcom_spin);
}
@ -182,7 +180,7 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags)
if (nodeid == dlm_our_nodeid()) {
rc = ls->ls_recover_buf;
rc->rc_result = dlm_recover_status(ls);
rc->rc_result = cpu_to_le32(dlm_recover_status(ls));
goto out;
}
@ -208,7 +206,7 @@ retry:
rc = ls->ls_recover_buf;
if (rc->rc_result == -ESRCH) {
if (rc->rc_result == cpu_to_le32(-ESRCH)) {
/* we pretend the remote lockspace exists with 0 status */
log_debug(ls, "remote node %d not ready", nodeid);
rc->rc_result = 0;
@ -227,7 +225,7 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
struct dlm_rcom *rc;
struct rcom_status *rs;
uint32_t status;
int nodeid = rc_in->rc_header.h_nodeid;
int nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid);
int len = sizeof(struct rcom_config);
struct dlm_msg *msg;
int num_slots = 0;
@ -259,7 +257,7 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
rc->rc_id = rc_in->rc_id;
rc->rc_seq_reply = rc_in->rc_seq;
rc->rc_result = status;
rc->rc_result = cpu_to_le32(status);
set_rcom_config(ls, (struct rcom_config *)rc->rc_buf, num_slots);
@ -287,14 +285,16 @@ static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
{
spin_lock(&ls->ls_rcom_spin);
if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) ||
rc_in->rc_id != ls->ls_rcom_seq) {
le64_to_cpu(rc_in->rc_id) != ls->ls_rcom_seq) {
log_debug(ls, "reject reply %d from %d seq %llx expect %llx",
rc_in->rc_type, rc_in->rc_header.h_nodeid,
(unsigned long long)rc_in->rc_id,
le32_to_cpu(rc_in->rc_type),
le32_to_cpu(rc_in->rc_header.h_nodeid),
(unsigned long long)le64_to_cpu(rc_in->rc_id),
(unsigned long long)ls->ls_rcom_seq);
goto out;
}
memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length);
memcpy(ls->ls_recover_buf, rc_in,
le16_to_cpu(rc_in->rc_header.h_length));
set_bit(LSFL_RCOM_READY, &ls->ls_flags);
clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
wake_up(&ls->ls_wait_general);
@ -336,8 +336,9 @@ static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in)
int error, inlen, outlen, nodeid;
struct dlm_msg *msg;
nodeid = rc_in->rc_header.h_nodeid;
inlen = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid);
inlen = le16_to_cpu(rc_in->rc_header.h_length) -
sizeof(struct dlm_rcom);
outlen = DLM_MAX_APP_BUFSIZE - sizeof(struct dlm_rcom);
error = create_rcom_stateless(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen,
@ -364,7 +365,7 @@ int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid)
if (error)
goto out;
memcpy(rc->rc_buf, r->res_name, r->res_length);
rc->rc_id = (unsigned long) r->res_id;
rc->rc_id = cpu_to_le64(r->res_id);
send_rcom(mh, rc);
out:
@ -375,11 +376,12 @@ static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
{
struct dlm_rcom *rc;
struct dlm_mhandle *mh;
int error, ret_nodeid, nodeid = rc_in->rc_header.h_nodeid;
int len = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
int error, ret_nodeid, nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid);
int len = le16_to_cpu(rc_in->rc_header.h_length) -
sizeof(struct dlm_rcom);
/* Old code would send this special id to trigger a debug dump. */
if (rc_in->rc_id == 0xFFFFFFFF) {
if (rc_in->rc_id == cpu_to_le64(0xFFFFFFFF)) {
log_error(ls, "receive_rcom_lookup dump from %d", nodeid);
dlm_dump_rsb_name(ls, rc_in->rc_buf, len);
return;
@ -393,7 +395,7 @@ static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
DLM_LU_RECOVER_MASTER, &ret_nodeid, NULL);
if (error)
ret_nodeid = error;
rc->rc_result = ret_nodeid;
rc->rc_result = cpu_to_le32(ret_nodeid);
rc->rc_id = rc_in->rc_id;
rc->rc_seq_reply = rc_in->rc_seq;
@ -452,7 +454,7 @@ int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
rl = (struct rcom_lock *) rc->rc_buf;
pack_rcom_lock(r, lkb, rl);
rc->rc_id = (unsigned long) r;
rc->rc_id = cpu_to_le64((uintptr_t)r);
send_rcom(mh, rc);
out:
@ -464,7 +466,7 @@ static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in)
{
struct dlm_rcom *rc;
struct dlm_mhandle *mh;
int error, nodeid = rc_in->rc_header.h_nodeid;
int error, nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid);
dlm_recover_master_copy(ls, rc_in);
@ -500,21 +502,20 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
rc = (struct dlm_rcom *) mb;
rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
rc->rc_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
rc->rc_header.u.h_lockspace = rc_in->rc_header.u.h_lockspace;
rc->rc_header.h_nodeid = dlm_our_nodeid();
rc->rc_header.h_length = mb_len;
rc->rc_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
rc->rc_header.h_length = cpu_to_le16(mb_len);
rc->rc_header.h_cmd = DLM_RCOM;
rc->rc_type = DLM_RCOM_STATUS_REPLY;
rc->rc_type = cpu_to_le32(DLM_RCOM_STATUS_REPLY);
rc->rc_id = rc_in->rc_id;
rc->rc_seq_reply = rc_in->rc_seq;
rc->rc_result = -ESRCH;
rc->rc_result = cpu_to_le32(-ESRCH);
rf = (struct rcom_config *) rc->rc_buf;
rf->rf_lvblen = cpu_to_le32(~0U);
dlm_rcom_out(rc);
dlm_midcomms_commit_mhandle(mh);
return 0;
@ -573,27 +574,27 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
uint64_t seq;
switch (rc->rc_type) {
case DLM_RCOM_STATUS_REPLY:
case cpu_to_le32(DLM_RCOM_STATUS_REPLY):
reply = 1;
break;
case DLM_RCOM_NAMES:
case cpu_to_le32(DLM_RCOM_NAMES):
names = 1;
break;
case DLM_RCOM_NAMES_REPLY:
case cpu_to_le32(DLM_RCOM_NAMES_REPLY):
names = 1;
reply = 1;
break;
case DLM_RCOM_LOOKUP:
case cpu_to_le32(DLM_RCOM_LOOKUP):
lookup = 1;
break;
case DLM_RCOM_LOOKUP_REPLY:
case cpu_to_le32(DLM_RCOM_LOOKUP_REPLY):
lookup = 1;
reply = 1;
break;
case DLM_RCOM_LOCK:
case cpu_to_le32(DLM_RCOM_LOCK):
lock = 1;
break;
case DLM_RCOM_LOCK_REPLY:
case cpu_to_le32(DLM_RCOM_LOCK_REPLY):
lock = 1;
reply = 1;
break;
@ -605,10 +606,10 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
seq = ls->ls_recover_seq;
spin_unlock(&ls->ls_recover_lock);
if (stop && (rc->rc_type != DLM_RCOM_STATUS))
if (stop && (rc->rc_type != cpu_to_le32(DLM_RCOM_STATUS)))
goto ignore;
if (reply && (rc->rc_seq_reply != seq))
if (reply && (le64_to_cpu(rc->rc_seq_reply) != seq))
goto ignore;
if (!(status & DLM_RS_NODES) && (names || lookup || lock))
@ -618,59 +619,60 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
goto ignore;
switch (rc->rc_type) {
case DLM_RCOM_STATUS:
case cpu_to_le32(DLM_RCOM_STATUS):
receive_rcom_status(ls, rc);
break;
case DLM_RCOM_NAMES:
case cpu_to_le32(DLM_RCOM_NAMES):
receive_rcom_names(ls, rc);
break;
case DLM_RCOM_LOOKUP:
case cpu_to_le32(DLM_RCOM_LOOKUP):
receive_rcom_lookup(ls, rc);
break;
case DLM_RCOM_LOCK:
if (rc->rc_header.h_length < lock_size)
case cpu_to_le32(DLM_RCOM_LOCK):
if (le16_to_cpu(rc->rc_header.h_length) < lock_size)
goto Eshort;
receive_rcom_lock(ls, rc);
break;
case DLM_RCOM_STATUS_REPLY:
case cpu_to_le32(DLM_RCOM_STATUS_REPLY):
receive_sync_reply(ls, rc);
break;
case DLM_RCOM_NAMES_REPLY:
case cpu_to_le32(DLM_RCOM_NAMES_REPLY):
receive_sync_reply(ls, rc);
break;
case DLM_RCOM_LOOKUP_REPLY:
case cpu_to_le32(DLM_RCOM_LOOKUP_REPLY):
receive_rcom_lookup_reply(ls, rc);
break;
case DLM_RCOM_LOCK_REPLY:
if (rc->rc_header.h_length < lock_size)
case cpu_to_le32(DLM_RCOM_LOCK_REPLY):
if (le16_to_cpu(rc->rc_header.h_length) < lock_size)
goto Eshort;
dlm_recover_process_copy(ls, rc);
break;
default:
log_error(ls, "receive_rcom bad type %d", rc->rc_type);
log_error(ls, "receive_rcom bad type %d",
le32_to_cpu(rc->rc_type));
}
return;
ignore:
log_limit(ls, "dlm_receive_rcom ignore msg %d "
"from %d %llu %llu recover seq %llu sts %x gen %u",
rc->rc_type,
le32_to_cpu(rc->rc_type),
nodeid,
(unsigned long long)rc->rc_seq,
(unsigned long long)rc->rc_seq_reply,
(unsigned long long)le64_to_cpu(rc->rc_seq),
(unsigned long long)le64_to_cpu(rc->rc_seq_reply),
(unsigned long long)seq,
status, ls->ls_generation);
return;
Eshort:
log_error(ls, "recovery message %d from %d is too short",
rc->rc_type, nodeid);
le32_to_cpu(rc->rc_type), nodeid);
}

View File

@ -114,7 +114,7 @@ static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
if (save_slots)
dlm_slot_save(ls, rc, memb);
if (rc->rc_result & wait_status)
if (le32_to_cpu(rc->rc_result) & wait_status)
break;
if (delay < 1000)
delay += 20;
@ -141,7 +141,7 @@ static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status,
if (error)
break;
if (rc->rc_result & wait_status)
if (le32_to_cpu(rc->rc_result) & wait_status)
break;
if (delay < 1000)
delay += 20;
@ -568,14 +568,14 @@ int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
struct dlm_rsb *r;
int ret_nodeid, new_master;
r = recover_idr_find(ls, rc->rc_id);
r = recover_idr_find(ls, le64_to_cpu(rc->rc_id));
if (!r) {
log_error(ls, "dlm_recover_master_reply no id %llx",
(unsigned long long)rc->rc_id);
(unsigned long long)le64_to_cpu(rc->rc_id));
goto out;
}
ret_nodeid = rc->rc_result;
ret_nodeid = le32_to_cpu(rc->rc_result);
if (ret_nodeid == dlm_our_nodeid())
new_master = 0;
@ -732,10 +732,9 @@ void dlm_recovered_lock(struct dlm_rsb *r)
static void recover_lvb(struct dlm_rsb *r)
{
struct dlm_lkb *lkb, *high_lkb = NULL;
struct dlm_lkb *big_lkb = NULL, *iter, *high_lkb = NULL;
uint32_t high_seq = 0;
int lock_lvb_exists = 0;
int big_lock_exists = 0;
int lvblen = r->res_ls->ls_lvblen;
if (!rsb_flag(r, RSB_NEW_MASTER2) &&
@ -751,37 +750,37 @@ static void recover_lvb(struct dlm_rsb *r)
/* we are the new master, so figure out if VALNOTVALID should
be set, and set the rsb lvb from the best lkb available. */
list_for_each_entry(lkb, &r->res_grantqueue, lkb_statequeue) {
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
list_for_each_entry(iter, &r->res_grantqueue, lkb_statequeue) {
if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
continue;
lock_lvb_exists = 1;
if (lkb->lkb_grmode > DLM_LOCK_CR) {
big_lock_exists = 1;
if (iter->lkb_grmode > DLM_LOCK_CR) {
big_lkb = iter;
goto setflag;
}
if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
high_lkb = lkb;
high_seq = lkb->lkb_lvbseq;
if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
high_lkb = iter;
high_seq = iter->lkb_lvbseq;
}
}
list_for_each_entry(lkb, &r->res_convertqueue, lkb_statequeue) {
if (!(lkb->lkb_exflags & DLM_LKF_VALBLK))
list_for_each_entry(iter, &r->res_convertqueue, lkb_statequeue) {
if (!(iter->lkb_exflags & DLM_LKF_VALBLK))
continue;
lock_lvb_exists = 1;
if (lkb->lkb_grmode > DLM_LOCK_CR) {
big_lock_exists = 1;
if (iter->lkb_grmode > DLM_LOCK_CR) {
big_lkb = iter;
goto setflag;
}
if (((int)lkb->lkb_lvbseq - (int)high_seq) >= 0) {
high_lkb = lkb;
high_seq = lkb->lkb_lvbseq;
if (((int)iter->lkb_lvbseq - (int)high_seq) >= 0) {
high_lkb = iter;
high_seq = iter->lkb_lvbseq;
}
}
@ -790,7 +789,7 @@ static void recover_lvb(struct dlm_rsb *r)
goto out;
/* lvb is invalidated if only NL/CR locks remain */
if (!big_lock_exists)
if (!big_lkb)
rsb_set_flag(r, RSB_VALNOTVALID);
if (!r->res_lvbptr) {
@ -799,9 +798,9 @@ static void recover_lvb(struct dlm_rsb *r)
goto out;
}
if (big_lock_exists) {
r->res_lvbseq = lkb->lkb_lvbseq;
memcpy(r->res_lvbptr, lkb->lkb_lvbptr, lvblen);
if (big_lkb) {
r->res_lvbseq = big_lkb->lkb_lvbseq;
memcpy(r->res_lvbptr, big_lkb->lkb_lvbptr, lvblen);
} else if (high_lkb) {
r->res_lvbseq = high_lkb->lkb_lvbseq;
memcpy(r->res_lvbptr, high_lkb->lkb_lvbptr, lvblen);

View File

@ -14,6 +14,7 @@
#include "dir.h"
#include "config.h"
#include "requestqueue.h"
#include "util.h"
struct rq_entry {
struct list_head list;
@ -32,7 +33,8 @@ struct rq_entry {
void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms)
{
struct rq_entry *e;
int length = ms->m_header.h_length - sizeof(struct dlm_message);
int length = le16_to_cpu(ms->m_header.h_length) -
sizeof(struct dlm_message);
e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS);
if (!e) {
@ -42,7 +44,7 @@ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms)
e->recover_seq = ls->ls_recover_seq & 0xFFFFFFFF;
e->nodeid = nodeid;
memcpy(&e->request, ms, ms->m_header.h_length);
memcpy(&e->request, ms, le16_to_cpu(ms->m_header.h_length));
atomic_inc(&ls->ls_requestqueue_cnt);
mutex_lock(&ls->ls_requestqueue_mutex);
@ -82,8 +84,10 @@ int dlm_process_requestqueue(struct dlm_ls *ls)
log_limit(ls, "dlm_process_requestqueue msg %d from %d "
"lkid %x remid %x result %d seq %u",
ms->m_type, ms->m_header.h_nodeid,
ms->m_lkid, ms->m_remid, ms->m_result,
le32_to_cpu(ms->m_type),
le32_to_cpu(ms->m_header.h_nodeid),
le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
from_dlm_errno(le32_to_cpu(ms->m_result)),
e->recover_seq);
dlm_receive_message_saved(ls, &e->request, e->recover_seq);
@ -124,7 +128,7 @@ void dlm_wait_requestqueue(struct dlm_ls *ls)
static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
{
uint32_t type = ms->m_type;
__le32 type = ms->m_type;
/* the ls is being cleaned up and freed by release_lockspace */
if (!atomic_read(&ls->ls_count))
@ -136,9 +140,9 @@ static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
/* directory operations are always purged because the directory is
always rebuilt during recovery and the lookups resent */
if (type == DLM_MSG_REMOVE ||
type == DLM_MSG_LOOKUP ||
type == DLM_MSG_LOOKUP_REPLY)
if (type == cpu_to_le32(DLM_MSG_REMOVE) ||
type == cpu_to_le32(DLM_MSG_LOOKUP) ||
type == cpu_to_le32(DLM_MSG_LOOKUP_REPLY))
return 1;
if (!dlm_no_directory(ls))

View File

@ -108,11 +108,11 @@ static void compat_input(struct dlm_write_request *kb,
kb->i.lock.parent = kb32->i.lock.parent;
kb->i.lock.xid = kb32->i.lock.xid;
kb->i.lock.timeout = kb32->i.lock.timeout;
kb->i.lock.castparam = (void *)(long)kb32->i.lock.castparam;
kb->i.lock.castaddr = (void *)(long)kb32->i.lock.castaddr;
kb->i.lock.bastparam = (void *)(long)kb32->i.lock.bastparam;
kb->i.lock.bastaddr = (void *)(long)kb32->i.lock.bastaddr;
kb->i.lock.lksb = (void *)(long)kb32->i.lock.lksb;
kb->i.lock.castparam = (__user void *)(long)kb32->i.lock.castparam;
kb->i.lock.castaddr = (__user void *)(long)kb32->i.lock.castaddr;
kb->i.lock.bastparam = (__user void *)(long)kb32->i.lock.bastparam;
kb->i.lock.bastaddr = (__user void *)(long)kb32->i.lock.bastaddr;
kb->i.lock.lksb = (__user void *)(long)kb32->i.lock.lksb;
memcpy(kb->i.lock.lvb, kb32->i.lock.lvb, DLM_USER_LVB_LEN);
memcpy(kb->i.lock.name, kb32->i.lock.name, namelen);
}
@ -127,9 +127,9 @@ static void compat_output(struct dlm_lock_result *res,
res32->version[1] = res->version[1];
res32->version[2] = res->version[2];
res32->user_astaddr = (__u32)(long)res->user_astaddr;
res32->user_astparam = (__u32)(long)res->user_astparam;
res32->user_lksb = (__u32)(long)res->user_lksb;
res32->user_astaddr = (__u32)(__force long)res->user_astaddr;
res32->user_astparam = (__u32)(__force long)res->user_astparam;
res32->user_lksb = (__u32)(__force long)res->user_lksb;
res32->bast_mode = res->bast_mode;
res32->lvb_offset = res->lvb_offset;

View File

@ -20,28 +20,10 @@
#define DLM_ERRNO_ETIMEDOUT 110
#define DLM_ERRNO_EINPROGRESS 115
void header_out(struct dlm_header *hd)
{
hd->h_version = cpu_to_le32(hd->h_version);
/* does it for others u32 in union as well */
hd->u.h_lockspace = cpu_to_le32(hd->u.h_lockspace);
hd->h_nodeid = cpu_to_le32(hd->h_nodeid);
hd->h_length = cpu_to_le16(hd->h_length);
}
void header_in(struct dlm_header *hd)
{
hd->h_version = le32_to_cpu(hd->h_version);
/* does it for others u32 in union as well */
hd->u.h_lockspace = le32_to_cpu(hd->u.h_lockspace);
hd->h_nodeid = le32_to_cpu(hd->h_nodeid);
hd->h_length = le16_to_cpu(hd->h_length);
}
/* higher errno values are inconsistent across architectures, so select
one set of values for on the wire */
static int to_dlm_errno(int err)
int to_dlm_errno(int err)
{
switch (err) {
case -EDEADLK:
@ -62,7 +44,7 @@ static int to_dlm_errno(int err)
return err;
}
static int from_dlm_errno(int err)
int from_dlm_errno(int err)
{
switch (err) {
case -DLM_ERRNO_EDEADLK:
@ -82,73 +64,3 @@ static int from_dlm_errno(int err)
}
return err;
}
void dlm_message_out(struct dlm_message *ms)
{
header_out(&ms->m_header);
ms->m_type = cpu_to_le32(ms->m_type);
ms->m_nodeid = cpu_to_le32(ms->m_nodeid);
ms->m_pid = cpu_to_le32(ms->m_pid);
ms->m_lkid = cpu_to_le32(ms->m_lkid);
ms->m_remid = cpu_to_le32(ms->m_remid);
ms->m_parent_lkid = cpu_to_le32(ms->m_parent_lkid);
ms->m_parent_remid = cpu_to_le32(ms->m_parent_remid);
ms->m_exflags = cpu_to_le32(ms->m_exflags);
ms->m_sbflags = cpu_to_le32(ms->m_sbflags);
ms->m_flags = cpu_to_le32(ms->m_flags);
ms->m_lvbseq = cpu_to_le32(ms->m_lvbseq);
ms->m_hash = cpu_to_le32(ms->m_hash);
ms->m_status = cpu_to_le32(ms->m_status);
ms->m_grmode = cpu_to_le32(ms->m_grmode);
ms->m_rqmode = cpu_to_le32(ms->m_rqmode);
ms->m_bastmode = cpu_to_le32(ms->m_bastmode);
ms->m_asts = cpu_to_le32(ms->m_asts);
ms->m_result = cpu_to_le32(to_dlm_errno(ms->m_result));
}
void dlm_message_in(struct dlm_message *ms)
{
header_in(&ms->m_header);
ms->m_type = le32_to_cpu(ms->m_type);
ms->m_nodeid = le32_to_cpu(ms->m_nodeid);
ms->m_pid = le32_to_cpu(ms->m_pid);
ms->m_lkid = le32_to_cpu(ms->m_lkid);
ms->m_remid = le32_to_cpu(ms->m_remid);
ms->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid);
ms->m_parent_remid = le32_to_cpu(ms->m_parent_remid);
ms->m_exflags = le32_to_cpu(ms->m_exflags);
ms->m_sbflags = le32_to_cpu(ms->m_sbflags);
ms->m_flags = le32_to_cpu(ms->m_flags);
ms->m_lvbseq = le32_to_cpu(ms->m_lvbseq);
ms->m_hash = le32_to_cpu(ms->m_hash);
ms->m_status = le32_to_cpu(ms->m_status);
ms->m_grmode = le32_to_cpu(ms->m_grmode);
ms->m_rqmode = le32_to_cpu(ms->m_rqmode);
ms->m_bastmode = le32_to_cpu(ms->m_bastmode);
ms->m_asts = le32_to_cpu(ms->m_asts);
ms->m_result = from_dlm_errno(le32_to_cpu(ms->m_result));
}
void dlm_rcom_out(struct dlm_rcom *rc)
{
header_out(&rc->rc_header);
rc->rc_type = cpu_to_le32(rc->rc_type);
rc->rc_result = cpu_to_le32(rc->rc_result);
rc->rc_id = cpu_to_le64(rc->rc_id);
rc->rc_seq = cpu_to_le64(rc->rc_seq);
rc->rc_seq_reply = cpu_to_le64(rc->rc_seq_reply);
}
void dlm_rcom_in(struct dlm_rcom *rc)
{
header_in(&rc->rc_header);
rc->rc_type = le32_to_cpu(rc->rc_type);
rc->rc_result = le32_to_cpu(rc->rc_result);
rc->rc_id = le64_to_cpu(rc->rc_id);
rc->rc_seq = le64_to_cpu(rc->rc_seq);
rc->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply);
}

View File

@ -11,12 +11,8 @@
#ifndef __UTIL_DOT_H__
#define __UTIL_DOT_H__
void dlm_message_out(struct dlm_message *ms);
void dlm_message_in(struct dlm_message *ms);
void dlm_rcom_out(struct dlm_rcom *rc);
void dlm_rcom_in(struct dlm_rcom *rc);
void header_out(struct dlm_header *hd);
void header_in(struct dlm_header *hd);
int to_dlm_errno(int err);
int from_dlm_errno(int err);
#endif