diff options
Diffstat (limited to 'db2/lock')
| -rw-r--r-- | db2/lock/lock.c | 1362 | ||||
| -rw-r--r-- | db2/lock/lock_conflict.c | 39 | ||||
| -rw-r--r-- | db2/lock/lock_deadlock.c | 496 | ||||
| -rw-r--r-- | db2/lock/lock_util.c | 103 |
4 files changed, 2000 insertions, 0 deletions
diff --git a/db2/lock/lock.c b/db2/lock/lock.c new file mode 100644 index 0000000000..8fc91334a7 --- /dev/null +++ b/db2/lock/lock.c @@ -0,0 +1,1362 @@ +/*- + * See the file LICENSE for redistribution information. + * + * Copyright (c) 1996, 1997 + * Sleepycat Software. All rights reserved. + */ + +#include "config.h" + +#ifndef lint +static const char sccsid[] = "@(#)lock.c 10.31 (Sleepycat) 8/17/97"; +#endif /* not lint */ + +#ifndef NO_SYSTEM_INCLUDES +#include <sys/types.h> +#include <sys/mman.h> +#include <sys/stat.h> + +#include <errno.h> +#include <fcntl.h> +#include <stddef.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <unistd.h> +#endif + +#include "db_int.h" +#include "shqueue.h" +#include "db_page.h" +#include "db_shash.h" +#include "lock.h" +#include "common_ext.h" +#include "db_am.h" + +static void __lock_checklocker __P((DB_LOCKTAB *, struct __db_lock *, int)); +static int __lock_count_locks __P((DB_LOCKREGION *)); +static int __lock_count_objs __P((DB_LOCKREGION *)); +static int __lock_create __P((const char *, int, DB_ENV *)); +static void __lock_freeobj __P((DB_LOCKTAB *, DB_LOCKOBJ *)); +static int __lock_get_internal __P((DB_LOCKTAB *, u_int32_t, int, const DBT *, + db_lockmode_t, struct __db_lock **)); +static int __lock_grow_region __P((DB_LOCKTAB *, int, size_t)); +static int __lock_put_internal __P((DB_LOCKTAB *, struct __db_lock *, int)); +static void __lock_remove_waiter + __P((DB_LOCKTAB *, DB_LOCKOBJ *, struct __db_lock *, db_status_t)); +static void __lock_reset_region __P((DB_LOCKTAB *)); +static int __lock_validate_region __P((DB_LOCKTAB *)); +#ifdef DEBUG +static void __lock_dump_locker __P((DB_LOCKTAB *, DB_LOCKOBJ *)); +static void __lock_dump_object __P((DB_LOCKTAB *, DB_LOCKOBJ *)); +static void __lock_printlock __P((DB_LOCKTAB *, struct __db_lock *, int)); +#endif + +/* + * Create and initialize a lock region in shared memory. + */ + +/* + * __lock_create -- + * Create the lock region. Returns an errno. In most cases, + * the errno should be that returned by __db_ropen, in which case + * an EAGAIN means that we should retry, and an EEXIST means that + * the region exists and we didn't need to create it. Any other + * sort of errno should be treated as a system error, leading to a + * failure of the original interface call. + */ +static int +__lock_create(path, mode, dbenv) + const char *path; + int mode; + DB_ENV *dbenv; +{ + struct __db_lock *lp; + struct lock_header *tq_head; + struct obj_header *obj_head; + DB_LOCKOBJ *op; + DB_LOCKREGION *lrp; + u_int maxlocks; + u_int32_t i; + int fd, lock_modes, nelements, ret; + u_int8_t *conflicts, *curaddr; + + maxlocks = dbenv == NULL || dbenv->lk_max == 0 ? + DB_LOCK_DEFAULT_N : dbenv->lk_max; + lock_modes = dbenv == NULL || dbenv->lk_modes == 0 ? + DB_LOCK_RW_N : dbenv->lk_modes; + conflicts = dbenv == NULL || dbenv->lk_conflicts == NULL ? + (u_int8_t *)db_rw_conflicts : dbenv->lk_conflicts; + + if ((ret = + __db_rcreate(dbenv, DB_APP_NONE, path, DB_DEFAULT_LOCK_FILE, mode, + LOCK_REGION_SIZE(lock_modes, maxlocks, __db_tablesize(maxlocks)), + &fd, &lrp)) != 0) + return (ret); + + /* Region exists; now initialize it. */ + lrp->table_size = __db_tablesize(maxlocks); + lrp->magic = DB_LOCKMAGIC; + lrp->version = DB_LOCKVERSION; + lrp->id = 0; + lrp->maxlocks = maxlocks; + lrp->need_dd = 0; + lrp->detect = DB_LOCK_NORUN; + lrp->numobjs = maxlocks; + lrp->nlockers = 0; + lrp->mem_bytes = ALIGN(STRING_SIZE(maxlocks), sizeof(size_t)); + lrp->increment = lrp->hdr.size / 2; + lrp->nmodes = lock_modes; + lrp->nconflicts = 0; + lrp->nrequests = 0; + lrp->nreleases = 0; + lrp->ndeadlocks = 0; + + /* + * As we write the region, we've got to maintain the alignment + * for the structures that follow each chunk. This information + * ends up being encapsulated both in here as well as in the + * lock.h file for the XXX_SIZE macros. + */ + /* Initialize conflict matrix. */ + curaddr = (u_int8_t *)lrp + sizeof(DB_LOCKREGION); + memcpy(curaddr, conflicts, lock_modes * lock_modes); + curaddr += lock_modes * lock_modes; + + /* + * Initialize hash table. + */ + curaddr = (u_int8_t *)ALIGNP(curaddr, LOCK_HASH_ALIGN); + lrp->hash_off = curaddr - (u_int8_t *)lrp; + nelements = lrp->table_size; + __db_hashinit(curaddr, nelements); + curaddr += nelements * sizeof(DB_HASHTAB); + + /* + * Initialize locks onto a free list. Since locks contains mutexes, + * we need to make sure that each lock is aligned on a MUTEX_ALIGNMENT + * boundary. + */ + curaddr = (u_int8_t *)ALIGNP(curaddr, MUTEX_ALIGNMENT); + tq_head = &lrp->free_locks; + SH_TAILQ_INIT(tq_head); + + for (i = 0; i++ < maxlocks; + curaddr += ALIGN(sizeof(struct __db_lock), MUTEX_ALIGNMENT)) { + lp = (struct __db_lock *)curaddr; + lp->status = DB_LSTAT_FREE; + SH_TAILQ_INSERT_HEAD(tq_head, lp, links, __db_lock); + } + + /* Initialize objects onto a free list. */ + obj_head = &lrp->free_objs; + SH_TAILQ_INIT(obj_head); + + for (i = 0; i++ < maxlocks; curaddr += sizeof(DB_LOCKOBJ)) { + op = (DB_LOCKOBJ *)curaddr; + SH_TAILQ_INSERT_HEAD(obj_head, op, links, __db_lockobj); + } + + /* + * Initialize the string space; as for all shared memory allocation + * regions, this requires size_t alignment, since we store the + * lengths of malloc'd areas in the area.. + */ + curaddr = (u_int8_t *)ALIGNP(curaddr, sizeof(size_t)); + lrp->mem_off = curaddr - (u_int8_t *)lrp; + __db_shalloc_init(curaddr, lrp->mem_bytes); + + /* Release the lock. */ + (void)__db_mutex_unlock(&lrp->hdr.lock, fd); + + /* Now unmap the region. */ + if ((ret = __db_rclose(dbenv, fd, lrp)) != 0) { + (void)lock_unlink(path, 1 /* force */, dbenv); + return (ret); + } + + return (0); +} + +int +lock_open(path, flags, mode, dbenv, ltp) + const char *path; + int flags, mode; + DB_ENV *dbenv; + DB_LOCKTAB **ltp; +{ + DB_LOCKTAB *lt; + int ret, retry_cnt; + + /* Validate arguments. */ +#ifdef HAVE_SPINLOCKS +#define OKFLAGS (DB_CREATE | DB_THREAD) +#else +#define OKFLAGS (DB_CREATE) +#endif + if ((ret = __db_fchk(dbenv, "lock_open", flags, OKFLAGS)) != 0) + return (ret); + + /* + * Create the lock table structure. + */ + if ((lt = (DB_LOCKTAB *)calloc(1, sizeof(DB_LOCKTAB))) == NULL) { + __db_err(dbenv, "%s", strerror(errno)); + return (ENOMEM); + } + lt->dbenv = dbenv; + + /* + * Now, create the lock region if it doesn't already exist. + */ + retry_cnt = 0; +retry: if (LF_ISSET(DB_CREATE) && + (ret = __lock_create(path, mode, dbenv)) != 0) + if (ret == EAGAIN && ++retry_cnt < 3) { + (void)__db_sleep(1, 0); + goto retry; + } else if (ret == EEXIST) /* We did not create the region */ + LF_CLR(DB_CREATE); + else + goto out; + + /* + * Finally, open the region, map it in, and increment the + * reference count. + */ + retry_cnt = 0; +retry1: if ((ret = __db_ropen(dbenv, DB_APP_NONE, path, DB_DEFAULT_LOCK_FILE, + LF_ISSET(~(DB_CREATE | DB_THREAD)), <->fd, <->region)) != 0) { + if (ret == EAGAIN && ++retry_cnt < 3) { + (void)__db_sleep(1, 0); + goto retry1; + } + goto out; + } + + if (lt->region->magic != DB_LOCKMAGIC) { + __db_err(dbenv, "lock_open: Bad magic number"); + ret = EINVAL; + goto out; + } + + /* Check for automatic deadlock detection. */ + if (dbenv->lk_detect != DB_LOCK_NORUN) { + if (lt->region->detect != DB_LOCK_NORUN && + dbenv->lk_detect != DB_LOCK_DEFAULT && + lt->region->detect != dbenv->lk_detect) { + __db_err(dbenv, + "lock_open: incompatible deadlock detector mode"); + ret = EINVAL; + goto out; + } + if (lt->region->detect == DB_LOCK_NORUN) + lt->region->detect = dbenv->lk_detect; + } + + /* Set up remaining pointers into region. */ + lt->conflicts = (u_int8_t *)lt->region + sizeof(DB_LOCKREGION); + lt->hashtab = + (DB_HASHTAB *)((u_int8_t *)lt->region + lt->region->hash_off); + lt->mem = (void *)((u_int8_t *)lt->region + lt->region->mem_off); + lt->reg_size = lt->region->hdr.size; + + *ltp = lt; + return (0); + +/* Error handling. */ +out: if (lt->region != NULL) + (void)__db_rclose(lt->dbenv, lt->fd, lt->region); + if (LF_ISSET(DB_CREATE)) + (void)lock_unlink(path, 1, lt->dbenv); + free(lt); + return (ret); +} + +int +lock_id (lt, idp) + DB_LOCKTAB *lt; + u_int32_t *idp; +{ + u_int32_t id; + + LOCK_LOCKREGION(lt); + if (lt->region->id >= DB_LOCK_MAXID) + lt->region->id = 0; + id = ++lt->region->id; + UNLOCK_LOCKREGION(lt); + + *idp = id; + return (0); +} + +int +lock_vec(lt, locker, flags, list, nlist, elistp) + DB_LOCKTAB *lt; + u_int32_t locker; + int flags, nlist; + DB_LOCKREQ *list, **elistp; +{ + struct __db_lock *lp; + DB_LOCKOBJ *sh_obj, *sh_locker; + int i, ret, run_dd; + + /* Validate arguments. */ + if ((ret = + __db_fchk(lt->dbenv, "lock_vec", flags, DB_LOCK_NOWAIT)) != 0) + return (ret); + + LOCK_LOCKREGION(lt); + + if ((ret = __lock_validate_region(lt)) != 0) { + UNLOCK_LOCKREGION(lt); + return (ret); + } + + ret = 0; + for (i = 0; i < nlist && ret == 0; i++) { + switch (list[i].op) { + case DB_LOCK_GET: + ret = __lock_get_internal(lt, locker, flags, + list[i].obj, list[i].mode, &lp); + if (ret == 0) + list[i].lock = LOCK_TO_OFFSET(lt, lp); + break; + case DB_LOCK_PUT: + lp = OFFSET_TO_LOCK(lt, list[i].lock); + if (lp->holder != locker) { + ret = DB_LOCK_NOTHELD; + break; + } + list[i].mode = lp->mode; + + /* XXX Need to copy the object. ??? */ + ret = __lock_put_internal(lt, lp, 0); + break; + case DB_LOCK_PUT_ALL: + /* Find the locker. */ + if ((ret = __lock_getobj(lt, locker, + NULL, DB_LOCK_LOCKER, &sh_locker)) != 0) + break; + + for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock); + lp != NULL; + lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock)) { + if ((ret = __lock_put_internal(lt, lp, 0)) != 0) + break; + } + __lock_freeobj(lt, sh_locker); + lt->region->nlockers--; + break; + case DB_LOCK_PUT_OBJ: + + /* Look up the object in the hash table. */ + __db_hashlookup(lt->hashtab, __db_lockobj, links, + list[i].obj, sh_obj, lt->region->table_size, + __lock_ohash, __lock_cmp); + if (sh_obj == NULL) { + ret = EINVAL; + break; + } + /* + * Release waiters first, because they won't cause + * anyone else to be awakened. If we release the + * lockers first, all the waiters get awakened + * needlessly. + */ + for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock); + lp != NULL; + lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock)) { + lt->region->nreleases += lp->refcount; + __lock_remove_waiter(lt, sh_obj, lp, + DB_LSTAT_NOGRANT); + __lock_checklocker(lt, lp, 1); + } + + for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock); + lp != NULL; + lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock)) { + + lt->region->nreleases += lp->refcount; + SH_LIST_REMOVE(lp, locker_links, __db_lock); + SH_TAILQ_REMOVE(&sh_obj->holders, lp, links, + __db_lock); + lp->status = DB_LSTAT_FREE; + SH_TAILQ_INSERT_HEAD(<->region->free_locks, + lp, links, __db_lock); + } + + /* Now free the object. */ + __lock_freeobj(lt, sh_obj); + break; +#ifdef DEBUG + case DB_LOCK_DUMP: + /* Find the locker. */ + if ((ret = __lock_getobj(lt, locker, + NULL, DB_LOCK_LOCKER, &sh_locker)) != 0) + break; + + for (lp = SH_LIST_FIRST(&sh_locker->heldby, __db_lock); + lp != NULL; + lp = SH_LIST_NEXT(lp, locker_links, __db_lock)) { + __lock_printlock(lt, lp, 1); + ret = EINVAL; + } + if (ret == 0) { + __lock_freeobj(lt, sh_locker); + lt->region->nlockers--; + } + break; +#endif + default: + ret = EINVAL; + break; + } + } + + if (lt->region->need_dd && lt->region->detect != DB_LOCK_NORUN) { + run_dd = 1; + lt->region->need_dd = 0; + } else + run_dd = 0; + + UNLOCK_LOCKREGION(lt); + + if (ret == 0 && run_dd) + lock_detect(lt, 0, lt->region->detect); + + if (elistp && ret != 0) + *elistp = &list[i - 1]; + return (ret); +} + +int +lock_get(lt, locker, flags, obj, lock_mode, lock) + DB_LOCKTAB *lt; + u_int32_t locker; + int flags; + const DBT *obj; + db_lockmode_t lock_mode; + DB_LOCK *lock; +{ + struct __db_lock *lockp; + int ret; + + /* Validate arguments. */ + if ((ret = + __db_fchk(lt->dbenv, "lock_get", flags, DB_LOCK_NOWAIT)) != 0) + return (ret); + + LOCK_LOCKREGION(lt); + + ret = __lock_validate_region(lt); + if (ret == 0 && (ret = __lock_get_internal(lt, + locker, flags, obj, lock_mode, &lockp)) == 0) { + *lock = LOCK_TO_OFFSET(lt, lockp); + lt->region->nrequests++; + } + + UNLOCK_LOCKREGION(lt); + return (ret); +} + +int +lock_put(lt, lock) + DB_LOCKTAB *lt; + DB_LOCK lock; +{ + struct __db_lock *lockp; + int ret, run_dd; + + LOCK_LOCKREGION(lt); + + if ((ret = __lock_validate_region(lt)) != 0) + return (ret); + else { + lockp = OFFSET_TO_LOCK(lt, lock); + ret = __lock_put_internal(lt, lockp, 0); + } + + __lock_checklocker(lt, lockp, 0); + + if (lt->region->need_dd && lt->region->detect != DB_LOCK_NORUN) { + run_dd = 1; + lt->region->need_dd = 0; + } else + run_dd = 0; + + UNLOCK_LOCKREGION(lt); + + if (ret == 0 && run_dd) + lock_detect(lt, 0, lt->region->detect); + + return (ret); +} + +int +lock_close(lt) + DB_LOCKTAB *lt; +{ + int ret; + + if ((ret = __db_rclose(lt->dbenv, lt->fd, lt->region)) != 0) + return (ret); + + /* Free lock table. */ + free(lt); + return (0); +} + +int +lock_unlink(path, force, dbenv) + const char *path; + int force; + DB_ENV *dbenv; +{ + return (__db_runlink(dbenv, + DB_APP_NONE, path, DB_DEFAULT_LOCK_FILE, force)); +} + +/* + * XXX This looks like it could be void, but I'm leaving it returning + * an int because I think it will have to when we go through and add + * the appropriate error checking for the EINTR on mutexes. + */ +static int +__lock_put_internal(lt, lockp, do_all) + DB_LOCKTAB *lt; + struct __db_lock *lockp; + int do_all; +{ + struct __db_lock *lp_w, *lp_h, *next_waiter; + DB_LOCKOBJ *sh_obj; + int state_changed; + + if (lockp->refcount == 0 || (lockp->status != DB_LSTAT_HELD && + lockp->status != DB_LSTAT_WAITING) || lockp->obj == 0) { + __db_err(lt->dbenv, "lock_put: invalid lock %lu", + (u_long)((u_int8_t *)lockp - (u_int8_t *)lt->region)); + return (EINVAL); + } + + if (do_all) + lt->region->nreleases += lockp->refcount; + else + lt->region->nreleases++; + if (do_all == 0 && lockp->refcount > 1) { + lockp->refcount--; + return (0); + } + + /* Get the object associated with this lock. */ + sh_obj = (DB_LOCKOBJ *)((u_int8_t *)lockp + lockp->obj); + + /* Remove lock from locker list. */ + SH_LIST_REMOVE(lockp, locker_links, __db_lock); + + /* Remove this lock from its holders/waitlist. */ + if (lockp->status != DB_LSTAT_HELD) + __lock_remove_waiter(lt, sh_obj, lockp, DB_LSTAT_FREE); + else + SH_TAILQ_REMOVE(&sh_obj->holders, lockp, links, __db_lock); + + /* + * We need to do lock promotion. We also need to determine if + * we're going to need to run the deadlock detector again. If + * we release locks, and there are waiters, but no one gets promoted, + * then we haven't fundamentally changed the lockmgr state, so + * we may still have a deadlock and we have to run again. However, + * if there were no waiters, or we actually promoted someone, then + * we are OK and we don't have to run it immediately. + */ + for (lp_w = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock), + state_changed = lp_w == NULL; + lp_w != NULL; + lp_w = next_waiter) { + next_waiter = SH_TAILQ_NEXT(lp_w, links, __db_lock); + for (lp_h = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock); + lp_h != NULL; + lp_h = SH_TAILQ_NEXT(lp_h, links, __db_lock)) { + if (CONFLICTS(lt, lp_h->mode, lp_w->mode) && + lp_h->holder != lp_w->holder) + break; + } + if (lp_h != NULL) /* Found a conflict. */ + break; + + /* No conflict, promote the waiting lock. */ + SH_TAILQ_REMOVE(&sh_obj->waiters, lp_w, links, __db_lock); + lp_w->status = DB_LSTAT_PENDING; + SH_TAILQ_INSERT_TAIL(&sh_obj->holders, lp_w, links); + + /* Wake up waiter. */ + (void)__db_mutex_unlock(&lp_w->mutex, lt->fd); + state_changed = 1; + } + + /* Check if object should be reclaimed. */ + if (SH_TAILQ_FIRST(&sh_obj->holders, __db_lock) == NULL) { + __db_hashremove_el(lt->hashtab, __db_lockobj, links, sh_obj, + lt->region->table_size, __lock_lhash); + __db_shalloc_free(lt->mem, SH_DBT_PTR(&sh_obj->lockobj)); + SH_TAILQ_INSERT_HEAD(<->region->free_objs, sh_obj, links, + __db_lockobj); + state_changed = 1; + } + + /* Free lock. */ + lockp->status = DB_LSTAT_FREE; + SH_TAILQ_INSERT_HEAD(<->region->free_locks, lockp, links, __db_lock); + + /* + * If we did not promote anyone; we need to run the deadlock + * detector again. + */ + if (state_changed == 0) + lt->region->need_dd = 1; + + return (0); +} + +static int +__lock_get_internal(lt, locker, flags, obj, lock_mode, lockp) + DB_LOCKTAB *lt; + u_int32_t locker; + int flags; + const DBT *obj; + db_lockmode_t lock_mode; + struct __db_lock **lockp; +{ + struct __db_lock *newl, *lp; + DB_LOCKOBJ *sh_obj, *sh_locker; + DB_LOCKREGION *lrp; + size_t newl_off; + int ret; + + ret = 0; + /* + * Check that lock mode is valid. + */ + + lrp = lt->region; + if ((u_int32_t)lock_mode >= lrp->nmodes) { + __db_err(lt->dbenv, + "lock_get: invalid lock mode %lu\n", (u_long)lock_mode); + return (EINVAL); + } + + /* Allocate a new lock. Optimize for the common case of a grant. */ + if ((newl = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock)) == NULL) { + if ((ret = __lock_grow_region(lt, DB_LOCK_LOCK, 0)) != 0) + return (ret); + lrp = lt->region; + newl = SH_TAILQ_FIRST(&lrp->free_locks, __db_lock); + } + newl_off = LOCK_TO_OFFSET(lt, newl); + + /* Optimize for common case of granting a lock. */ + SH_TAILQ_REMOVE(&lrp->free_locks, newl, links, __db_lock); + + newl->mode = lock_mode; + newl->status = DB_LSTAT_HELD; + newl->holder = locker; + newl->refcount = 1; + + if ((ret = + __lock_getobj(lt, 0, (DBT *)obj, DB_LOCK_OBJTYPE, &sh_obj)) != 0) + return (ret); + + lrp = lt->region; /* getobj might have grown */ + newl = OFFSET_TO_LOCK(lt, newl_off); + + /* Now make new lock point to object */ + newl->obj = SH_PTR_TO_OFF(newl, sh_obj); + + /* + * Now we have a lock and an object and we need to see if we should + * grant the lock. We use a FIFO ordering so we can only grant a + * new lock if it does not conflict with anyone on the holders list + * OR anyone on the waiters list. In case of conflict, we put the + * new lock on the end of the waiters list. + */ + for (lp = SH_TAILQ_FIRST(&sh_obj->holders, __db_lock); + lp != NULL; + lp = SH_TAILQ_NEXT(lp, links, __db_lock)) { + if (CONFLICTS(lt, lp->mode, lock_mode) && + locker != lp->holder) + break; + else if (lp->holder == locker && lp->mode == lock_mode && + lp->status == DB_LSTAT_HELD) { + /* Lock is already held, just inc the ref count. */ + lp->refcount++; + SH_TAILQ_INSERT_HEAD(&lrp->free_locks, newl, links, + __db_lock); + *lockp = lp; + return (0); + } + } + + if (lp == NULL) + for (lp = SH_TAILQ_FIRST(&sh_obj->waiters, __db_lock); + lp != NULL; + lp = SH_TAILQ_NEXT(lp, links, __db_lock)) { + if (CONFLICTS(lt, lp->mode, lock_mode) && + locker != lp->holder) + break; + } + if (lp == NULL) + SH_TAILQ_INSERT_TAIL(&sh_obj->holders, newl, links); + else if (!(flags & DB_LOCK_NOWAIT)) + SH_TAILQ_INSERT_TAIL(&sh_obj->waiters, newl, links); + else { + /* Free the lock and return an error. */ + newl->status = DB_LSTAT_FREE; + SH_TAILQ_INSERT_HEAD(&lrp->free_locks, newl, links, __db_lock); + return (DB_LOCK_NOTGRANTED); + } + + /* + * This is really a blocker for the process, so initialize it + * set. That way the current process will block when it tries + * to get it and the waking process will release it. + */ + (void)__db_mutex_init(&newl->mutex, + MUTEX_LOCK_OFFSET(lt->region, &newl->mutex)); + (void)__db_mutex_lock(&newl->mutex, lt->fd, + lt->dbenv == NULL ? NULL : lt->dbenv->db_yield); + + /* + * Now, insert the lock onto its locker's list. + */ + if ((ret = + __lock_getobj(lt, locker, NULL, DB_LOCK_LOCKER, &sh_locker)) != 0) + return (ret); + + lrp = lt->region; + SH_LIST_INSERT_HEAD(&sh_locker->heldby, newl, locker_links, __db_lock); + + if (lp != NULL) { + newl->status = DB_LSTAT_WAITING; + lrp->nconflicts++; + /* + * We are about to wait; must release the region mutex. + * Then, when we wakeup, we need to reacquire the region + * mutex before continuing. + */ + if (lrp->detect == DB_LOCK_NORUN) + lt->region->need_dd = 1; + UNLOCK_LOCKREGION(lt); + + /* + * We are about to wait; before waiting, see if the deadlock + * detector should be run. + */ + if (lrp->detect != DB_LOCK_NORUN) + ret = lock_detect(lt, 0, lrp->detect); + + (void)__db_mutex_lock(&newl->mutex, + lt->fd, lt->dbenv == NULL ? NULL : lt->dbenv->db_yield); + + LOCK_LOCKREGION(lt); + if (newl->status != DB_LSTAT_PENDING) { + /* Return to free list. */ + __lock_checklocker(lt, newl, 0); + SH_TAILQ_INSERT_HEAD(&lrp->free_locks, newl, links, + __db_lock); + switch (newl->status) { + case DB_LSTAT_ABORTED: + ret = DB_LOCK_DEADLOCK; + break; + case DB_LSTAT_NOGRANT: + ret = DB_LOCK_NOTGRANTED; + break; + default: + ret = EINVAL; + break; + } + newl->status = DB_LSTAT_FREE; + newl = NULL; + } else |
