gfs2: Remove LM_FLAG_PRIORITY flag
The last user of this flag was removed in commit b77b4a4815
("gfs2:
Rework freeze / thaw logic").
Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
This commit is contained in:
parent
de3e7f97ae
commit
0b93bac227
4 changed files with 7 additions and 33 deletions
|
@ -20,8 +20,7 @@ The gl_holders list contains all the queued lock requests (not
|
||||||
just the holders) associated with the glock. If there are any
|
just the holders) associated with the glock. If there are any
|
||||||
held locks, then they will be contiguous entries at the head
|
held locks, then they will be contiguous entries at the head
|
||||||
of the list. Locks are granted in strictly the order that they
|
of the list. Locks are granted in strictly the order that they
|
||||||
are queued, except for those marked LM_FLAG_PRIORITY which are
|
are queued.
|
||||||
used only during recovery, and even then only for journal locks.
|
|
||||||
|
|
||||||
There are three lock states that users of the glock layer can request,
|
There are three lock states that users of the glock layer can request,
|
||||||
namely shared (SH), deferred (DF) and exclusive (EX). Those translate
|
namely shared (SH), deferred (DF) and exclusive (EX). Those translate
|
||||||
|
|
|
@ -591,8 +591,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
|
||||||
if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
|
if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
|
||||||
/* move to back of queue and try next entry */
|
/* move to back of queue and try next entry */
|
||||||
if (ret & LM_OUT_CANCELED) {
|
if (ret & LM_OUT_CANCELED) {
|
||||||
if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
|
list_move_tail(&gh->gh_list, &gl->gl_holders);
|
||||||
list_move_tail(&gh->gh_list, &gl->gl_holders);
|
|
||||||
gh = find_first_waiter(gl);
|
gh = find_first_waiter(gl);
|
||||||
gl->gl_target = gh->gh_state;
|
gl->gl_target = gh->gh_state;
|
||||||
goto retry;
|
goto retry;
|
||||||
|
@ -679,8 +678,7 @@ __acquires(&gl->gl_lockref.lock)
|
||||||
gh && !(gh->gh_flags & LM_FLAG_NOEXP))
|
gh && !(gh->gh_flags & LM_FLAG_NOEXP))
|
||||||
goto skip_inval;
|
goto skip_inval;
|
||||||
|
|
||||||
lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
|
lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP);
|
||||||
LM_FLAG_PRIORITY);
|
|
||||||
GLOCK_BUG_ON(gl, gl->gl_state == target);
|
GLOCK_BUG_ON(gl, gl->gl_state == target);
|
||||||
GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
|
GLOCK_BUG_ON(gl, gl->gl_state == gl->gl_target);
|
||||||
if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
|
if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
|
||||||
|
@ -1515,27 +1513,20 @@ fail:
|
||||||
}
|
}
|
||||||
if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
|
if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
|
||||||
continue;
|
continue;
|
||||||
if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
|
|
||||||
insert_pt = &gh2->gh_list;
|
|
||||||
}
|
}
|
||||||
trace_gfs2_glock_queue(gh, 1);
|
trace_gfs2_glock_queue(gh, 1);
|
||||||
gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
|
gfs2_glstats_inc(gl, GFS2_LKS_QCOUNT);
|
||||||
gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
|
gfs2_sbstats_inc(gl, GFS2_LKS_QCOUNT);
|
||||||
if (likely(insert_pt == NULL)) {
|
if (likely(insert_pt == NULL)) {
|
||||||
list_add_tail(&gh->gh_list, &gl->gl_holders);
|
list_add_tail(&gh->gh_list, &gl->gl_holders);
|
||||||
if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
|
|
||||||
goto do_cancel;
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
list_add_tail(&gh->gh_list, insert_pt);
|
list_add_tail(&gh->gh_list, insert_pt);
|
||||||
do_cancel:
|
|
||||||
gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
|
gh = list_first_entry(&gl->gl_holders, struct gfs2_holder, gh_list);
|
||||||
if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
|
spin_unlock(&gl->gl_lockref.lock);
|
||||||
spin_unlock(&gl->gl_lockref.lock);
|
if (sdp->sd_lockstruct.ls_ops->lm_cancel)
|
||||||
if (sdp->sd_lockstruct.ls_ops->lm_cancel)
|
sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
|
||||||
sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
|
spin_lock(&gl->gl_lockref.lock);
|
||||||
spin_lock(&gl->gl_lockref.lock);
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
trap_recursive:
|
trap_recursive:
|
||||||
|
@ -2227,8 +2218,6 @@ static const char *hflags2str(char *buf, u16 flags, unsigned long iflags)
|
||||||
*p++ = 'e';
|
*p++ = 'e';
|
||||||
if (flags & LM_FLAG_ANY)
|
if (flags & LM_FLAG_ANY)
|
||||||
*p++ = 'A';
|
*p++ = 'A';
|
||||||
if (flags & LM_FLAG_PRIORITY)
|
|
||||||
*p++ = 'p';
|
|
||||||
if (flags & LM_FLAG_NODE_SCOPE)
|
if (flags & LM_FLAG_NODE_SCOPE)
|
||||||
*p++ = 'n';
|
*p++ = 'n';
|
||||||
if (flags & GL_ASYNC)
|
if (flags & GL_ASYNC)
|
||||||
|
|
|
@ -68,14 +68,6 @@ enum {
|
||||||
* also be granted in SHARED. The preferred state is whichever is compatible
|
* also be granted in SHARED. The preferred state is whichever is compatible
|
||||||
* with other granted locks, or the specified state if no other locks exist.
|
* with other granted locks, or the specified state if no other locks exist.
|
||||||
*
|
*
|
||||||
* LM_FLAG_PRIORITY
|
|
||||||
* Override fairness considerations. Suppose a lock is held in a shared state
|
|
||||||
* and there is a pending request for the deferred state. A shared lock
|
|
||||||
* request with the priority flag would be allowed to bypass the deferred
|
|
||||||
* request and directly join the other shared lock. A shared lock request
|
|
||||||
* without the priority flag might be forced to wait until the deferred
|
|
||||||
* requested had acquired and released the lock.
|
|
||||||
*
|
|
||||||
* LM_FLAG_NODE_SCOPE
|
* LM_FLAG_NODE_SCOPE
|
||||||
* This holder agrees to share the lock within this node. In other words,
|
* This holder agrees to share the lock within this node. In other words,
|
||||||
* the glock is held in EX mode according to DLM, but local holders on the
|
* the glock is held in EX mode according to DLM, but local holders on the
|
||||||
|
@ -86,7 +78,6 @@ enum {
|
||||||
#define LM_FLAG_TRY_1CB 0x0002
|
#define LM_FLAG_TRY_1CB 0x0002
|
||||||
#define LM_FLAG_NOEXP 0x0004
|
#define LM_FLAG_NOEXP 0x0004
|
||||||
#define LM_FLAG_ANY 0x0008
|
#define LM_FLAG_ANY 0x0008
|
||||||
#define LM_FLAG_PRIORITY 0x0010
|
|
||||||
#define LM_FLAG_NODE_SCOPE 0x0020
|
#define LM_FLAG_NODE_SCOPE 0x0020
|
||||||
#define GL_ASYNC 0x0040
|
#define GL_ASYNC 0x0040
|
||||||
#define GL_EXACT 0x0080
|
#define GL_EXACT 0x0080
|
||||||
|
|
|
@ -222,11 +222,6 @@ static u32 make_flags(struct gfs2_glock *gl, const unsigned int gfs_flags,
|
||||||
lkf |= DLM_LKF_NOQUEUEBAST;
|
lkf |= DLM_LKF_NOQUEUEBAST;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (gfs_flags & LM_FLAG_PRIORITY) {
|
|
||||||
lkf |= DLM_LKF_NOORDER;
|
|
||||||
lkf |= DLM_LKF_HEADQUE;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (gfs_flags & LM_FLAG_ANY) {
|
if (gfs_flags & LM_FLAG_ANY) {
|
||||||
if (req == DLM_LOCK_PR)
|
if (req == DLM_LOCK_PR)
|
||||||
lkf |= DLM_LKF_ALTCW;
|
lkf |= DLM_LKF_ALTCW;
|
||||||
|
|
Loading…
Add table
Reference in a new issue