drbd: renamed drbd_tl_epoch.n_req to drbd_tl_epoch.n_writes

Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
This commit is contained in:
Philipp Reisner 2010-05-27 14:49:27 +02:00
parent 504c6d1b44
commit 7e602c0aaf
3 changed files with 9 additions and 9 deletions

View File

@ -697,7 +697,7 @@ struct drbd_tl_epoch {
struct list_head requests; /* requests before */
struct drbd_tl_epoch *next; /* pointer to the next barrier */
unsigned int br_number; /* the barriers identifier. */
int n_req; /* number of requests attached before this barrier */
int n_writes; /* number of requests attached before this barrier */
};
struct drbd_request;

View File

@ -199,7 +199,7 @@ static int tl_init(struct drbd_conf *mdev)
INIT_LIST_HEAD(&b->w.list);
b->next = NULL;
b->br_number = 4711;
b->n_req = 0;
b->n_writes = 0;
b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
mdev->oldest_tle = b;
@ -240,7 +240,7 @@ void _tl_add_barrier(struct drbd_conf *mdev, struct drbd_tl_epoch *new)
INIT_LIST_HEAD(&new->w.list);
new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
new->next = NULL;
new->n_req = 0;
new->n_writes = 0;
newest_before = mdev->newest_tle;
/* never send a barrier number == 0, because that is special-cased
@ -284,9 +284,9 @@ void tl_release(struct drbd_conf *mdev, unsigned int barrier_nr,
barrier_nr, b->br_number);
goto bail;
}
if (b->n_req != set_size) {
dev_err(DEV, "BAD! BarrierAck #%u received with n_req=%u, expected n_req=%u!\n",
barrier_nr, set_size, b->n_req);
if (b->n_writes != set_size) {
dev_err(DEV, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
barrier_nr, set_size, b->n_writes);
goto bail;
}
@ -378,7 +378,7 @@ void tl_clear(struct drbd_conf *mdev)
INIT_LIST_HEAD(&b->w.list);
b->w.cb = NULL;
b->br_number = new_initial_bnr;
b->n_req = 0;
b->n_writes = 0;
mdev->oldest_tle = b;
break;

View File

@ -521,7 +521,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
&mdev->newest_tle->requests);
/* increment size of current epoch */
mdev->newest_tle->n_req++;
mdev->newest_tle->n_writes++;
/* queue work item to send data */
D_ASSERT(req->rq_state & RQ_NET_PENDING);
@ -530,7 +530,7 @@ void __req_mod(struct drbd_request *req, enum drbd_req_event what,
drbd_queue_work(&mdev->data.work, &req->w);
/* close the epoch, in case it outgrew the limit */
if (mdev->newest_tle->n_req >= mdev->net_conf->max_epoch_size)
if (mdev->newest_tle->n_writes >= mdev->net_conf->max_epoch_size)
queue_barrier(mdev);
break;