summaryrefslogtreecommitdiffstats
path: root/sys/kern/kern_lockf.c
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern/kern_lockf.c')
-rw-r--r--sys/kern/kern_lockf.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/sys/kern/kern_lockf.c b/sys/kern/kern_lockf.c
index 91eae09..a0a3789 100644
--- a/sys/kern/kern_lockf.c
+++ b/sys/kern/kern_lockf.c
@@ -362,7 +362,7 @@ lf_free_lock(struct lockf_entry *lock)
struct lock_owner *lo = lock->lf_owner;
if (lo) {
KASSERT(LIST_EMPTY(&lock->lf_outedges),
- ("freeing lock with dependancies"));
+ ("freeing lock with dependencies"));
KASSERT(LIST_EMPTY(&lock->lf_inedges),
("freeing lock with dependants"));
sx_xlock(&lf_lock_owners_lock);
@@ -827,7 +827,7 @@ lf_purgelocks(struct vnode *vp, struct lockf **statep)
/*
* We can just free all the active locks since they
- * will have no dependancies (we removed them all
+ * will have no dependencies (we removed them all
* above). We don't need to bother locking since we
* are the last thread using this state structure.
*/
@@ -1112,7 +1112,7 @@ lf_insert_lock(struct lockf *state, struct lockf_entry *lock)
/*
* Wake up a sleeping lock and remove it from the pending list now
- * that all its dependancies have been resolved. The caller should
+ * that all its dependencies have been resolved. The caller should
* arrange for the lock to be added to the active list, adjusting any
* existing locks for the same owner as needed.
*/
@@ -1137,9 +1137,9 @@ lf_wakeup_lock(struct lockf *state, struct lockf_entry *wakelock)
}
/*
- * Re-check all dependant locks and remove edges to locks that we no
+ * Re-check all dependent locks and remove edges to locks that we no
* longer block. If 'all' is non-zero, the lock has been removed and
- * we must remove all the dependancies, otherwise it has simply been
+ * we must remove all the dependencies, otherwise it has simply been
* reduced but remains active. Any pending locks which have been been
* unblocked are added to 'granted'
*/
@@ -1165,7 +1165,7 @@ lf_update_dependancies(struct lockf *state, struct lockf_entry *lock, int all,
}
/*
- * Set the start of an existing active lock, updating dependancies and
+ * Set the start of an existing active lock, updating dependencies and
* adding any newly woken locks to 'granted'.
*/
static void
@@ -1181,7 +1181,7 @@ lf_set_start(struct lockf *state, struct lockf_entry *lock, off_t new_start,
}
/*
- * Set the end of an existing active lock, updating dependancies and
+ * Set the end of an existing active lock, updating dependencies and
* adding any newly woken locks to 'granted'.
*/
static void
@@ -1204,7 +1204,7 @@ lf_set_end(struct lockf *state, struct lockf_entry *lock, off_t new_end,
* pending locks as a result of downgrading/unlocking. We simply
* activate the newly granted locks by looping.
*
- * Since the new lock already has its dependancies set up, we always
+ * Since the new lock already has its dependencies set up, we always
* add it to the list (unless its an unlock request). This may
* fragment the lock list in some pathological cases but its probably
* not a real problem.
@@ -1332,7 +1332,7 @@ lf_cancel_lock(struct lockf *state, struct lockf_entry *lock)
* may allow some other pending lock to become
* active. Consider this case:
*
- * Owner Action Result Dependancies
+ * Owner Action Result Dependencies
*
* A: lock [0..0] succeeds
* B: lock [2..2] succeeds
@@ -1840,7 +1840,7 @@ lf_split(struct lockf *state, struct lockf_entry *lock1,
/*
* This cannot cause a deadlock since any edges we would add
* to splitlock already exist in lock1. We must be sure to add
- * necessary dependancies to splitlock before we reduce lock1
+ * necessary dependencies to splitlock before we reduce lock1
* otherwise we may accidentally grant a pending lock that
* was blocked by the tail end of lock1.
*/
OpenPOWER on IntegriCloud