diff options
author | David Teigland <teigland@redhat.com> | 2006-01-18 09:30:29 +0000 |
---|---|---|
committer | Steven Whitehouse <swhiteho@redhat.com> | 2006-01-18 09:30:29 +0000 |
commit | e7fd41792fc0ee52a05fcaac87511f118328d147 (patch) | |
tree | eee5227088ba97daef795e385b7548d2a1cc4cb6 /fs/dlm/requestqueue.c | |
parent | e47314207032cfd1157b8c377df162839b32ea6f (diff) | |
download | lwn-e7fd41792fc0ee52a05fcaac87511f118328d147.tar.gz lwn-e7fd41792fc0ee52a05fcaac87511f118328d147.zip |
[DLM] The core of the DLM for GFS2/CLVM
This is the core of the distributed lock manager which is required
to use GFS2 as a cluster filesystem. It is also used by CLVM and
can be used as a standalone lock manager independantly of either
of these two projects.
It implements VAX-style locking modes.
Signed-off-by: David Teigland <teigland@redhat.com>
Signed-off-by: Steve Whitehouse <swhiteho@redhat.com>
Diffstat (limited to 'fs/dlm/requestqueue.c')
-rw-r--r-- | fs/dlm/requestqueue.c | 184 |
1 files changed, 184 insertions, 0 deletions
diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c new file mode 100644 index 000000000000..36afe99e4f93 --- /dev/null +++ b/fs/dlm/requestqueue.c @@ -0,0 +1,184 @@ +/****************************************************************************** +******************************************************************************* +** +** Copyright (C) 2005 Red Hat, Inc. All rights reserved. +** +** This copyrighted material is made available to anyone wishing to use, +** modify, copy, or redistribute it subject to the terms and conditions +** of the GNU General Public License v.2. +** +******************************************************************************* +******************************************************************************/ + +#include "dlm_internal.h" +#include "member.h" +#include "lock.h" +#include "dir.h" +#include "config.h" +#include "requestqueue.h" + +struct rq_entry { + struct list_head list; + int nodeid; + char request[1]; +}; + +/* + * Requests received while the lockspace is in recovery get added to the + * request queue and processed when recovery is complete. This happens when + * the lockspace is suspended on some nodes before it is on others, or the + * lockspace is enabled on some while still suspended on others. + */ + +void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_header *hd) +{ + struct rq_entry *e; + int length = hd->h_length; + + if (dlm_is_removed(ls, nodeid)) + return; + + e = kmalloc(sizeof(struct rq_entry) + length, GFP_KERNEL); + if (!e) { + log_print("dlm_add_requestqueue: out of memory\n"); + return; + } + + e->nodeid = nodeid; + memcpy(e->request, hd, length); + + down(&ls->ls_requestqueue_lock); + list_add_tail(&e->list, &ls->ls_requestqueue); + up(&ls->ls_requestqueue_lock); +} + +int dlm_process_requestqueue(struct dlm_ls *ls) +{ + struct rq_entry *e; + struct dlm_header *hd; + int error = 0; + + down(&ls->ls_requestqueue_lock); + + for (;;) { + if (list_empty(&ls->ls_requestqueue)) { + up(&ls->ls_requestqueue_lock); + error = 0; + break; + } + e = list_entry(ls->ls_requestqueue.next, struct rq_entry, list); + up(&ls->ls_requestqueue_lock); + + hd = (struct dlm_header *) e->request; + error = dlm_receive_message(hd, e->nodeid, TRUE); + + if (error == -EINTR) { + /* entry is left on requestqueue */ + log_debug(ls, "process_requestqueue abort eintr"); + break; + } + + down(&ls->ls_requestqueue_lock); + list_del(&e->list); + kfree(e); + + if (dlm_locking_stopped(ls)) { + log_debug(ls, "process_requestqueue abort running"); + up(&ls->ls_requestqueue_lock); + error = -EINTR; + break; + } + schedule(); + } + + return error; +} + +/* + * After recovery is done, locking is resumed and dlm_recoverd takes all the + * saved requests and processes them as they would have been by dlm_recvd. At + * the same time, dlm_recvd will start receiving new requests from remote + * nodes. We want to delay dlm_recvd processing new requests until + * dlm_recoverd has finished processing the old saved requests. + */ + +void dlm_wait_requestqueue(struct dlm_ls *ls) +{ + for (;;) { + down(&ls->ls_requestqueue_lock); + if (list_empty(&ls->ls_requestqueue)) + break; + if (dlm_locking_stopped(ls)) + break; + up(&ls->ls_requestqueue_lock); + schedule(); + } + up(&ls->ls_requestqueue_lock); +} + +static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid) +{ + uint32_t type = ms->m_type; + + if (dlm_is_removed(ls, nodeid)) + return 1; + + /* directory operations are always purged because the directory is + always rebuilt during recovery and the lookups resent */ + + if (type == DLM_MSG_REMOVE || + type == DLM_MSG_LOOKUP || + type == DLM_MSG_LOOKUP_REPLY) + return 1; + + if (!dlm_no_directory(ls)) + return 0; + + /* with no directory, the master is likely to change as a part of + recovery; requests to/from the defunct master need to be purged */ + + switch (type) { + case DLM_MSG_REQUEST: + case DLM_MSG_CONVERT: + case DLM_MSG_UNLOCK: + case DLM_MSG_CANCEL: + /* we're no longer the master of this resource, the sender + will resend to the new master (see waiter_needs_recovery) */ + + if (dlm_hash2nodeid(ls, ms->m_hash) != dlm_our_nodeid()) + return 1; + break; + + case DLM_MSG_REQUEST_REPLY: + case DLM_MSG_CONVERT_REPLY: + case DLM_MSG_UNLOCK_REPLY: + case DLM_MSG_CANCEL_REPLY: + case DLM_MSG_GRANT: + /* this reply is from the former master of the resource, + we'll resend to the new master if needed */ + + if (dlm_hash2nodeid(ls, ms->m_hash) != nodeid) + return 1; + break; + } + + return 0; +} + +void dlm_purge_requestqueue(struct dlm_ls *ls) +{ + struct dlm_message *ms; + struct rq_entry *e, *safe; + + down(&ls->ls_requestqueue_lock); + list_for_each_entry_safe(e, safe, &ls->ls_requestqueue, list) { + ms = (struct dlm_message *) e->request; + + if (purge_request(ls, ms, e->nodeid)) { + list_del(&e->list); + kfree(e); + } + } + up(&ls->ls_requestqueue_lock); +} + |