lck2 75 source3/locking/brlock.c const struct lock_struct *lck2)
lck2 81 source3/locking/brlock.c lck1->start == lck2->start &&
lck2 82 source3/locking/brlock.c lck1->size == lck2->size) {
lck2 86 source3/locking/brlock.c if (lck1->start >= (lck2->start+lck2->size) ||
lck2 87 source3/locking/brlock.c lck2->start >= (lck1->start+lck1->size)) {
lck2 98 source3/locking/brlock.c const struct lock_struct *lck2)
lck2 101 source3/locking/brlock.c if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
lck2 105 source3/locking/brlock.c if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
lck2 111 source3/locking/brlock.c if (lck1->lock_type == WRITE_LOCK && lck2->lock_type == READ_LOCK &&
lck2 112 source3/locking/brlock.c brl_same_context(&lck1->context, &lck2->context) &&
lck2 113 source3/locking/brlock.c lck1->fnum == lck2->fnum) {
lck2 117 source3/locking/brlock.c return brl_overlap(lck1, lck2);
lck2 127 source3/locking/brlock.c const struct lock_struct *lck2)
lck2 131 source3/locking/brlock.c SMB_ASSERT(lck2->lock_flav == POSIX_LOCK);
lck2 135 source3/locking/brlock.c if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
lck2 139 source3/locking/brlock.c if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
lck2 144 source3/locking/brlock.c if (brl_same_context(&lck1->context, &lck2->context)) {
lck2 150 source3/locking/brlock.c return brl_overlap(lck1, lck2);
lck2 155 source3/locking/brlock.c const struct lock_struct *lck2)
lck2 157 source3/locking/brlock.c if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
lck2 160 source3/locking/brlock.c if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
lck2 164 source3/locking/brlock.c if (brl_same_context(&lck1->context, &lck2->context) &&
lck2 165 source3/locking/brlock.c lck2->lock_type == READ_LOCK && lck1->fnum == lck2->fnum) {
lck2 169 source3/locking/brlock.c if (lck2->start == 0 && lck2->size == 0 && lck1->size != 0) {
lck2 173 source3/locking/brlock.c if (lck1->start >= (lck2->start + lck2->size) ||
lck2 174 source3/locking/brlock.c lck2->start >= (lck1->start + lck1->size)) {
lck2 188 source3/locking/brlock.c static bool brl_conflict_other(const struct lock_struct *lck1, const struct lock_struct *lck2)
lck2 190 source3/locking/brlock.c if (IS_PENDING_LOCK(lck1->lock_type) || IS_PENDING_LOCK(lck2->lock_type))
lck2 193 source3/locking/brlock.c if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
lck2 199 source3/locking/brlock.c if (lck1->lock_flav == POSIX_LOCK && lck2->lock_flav == POSIX_LOCK)
lck2 207 source3/locking/brlock.c if (!(lck2->lock_type == WRITE_LOCK && lck1->lock_type == READ_LOCK)) {
lck2 208 source3/locking/brlock.c if (brl_same_context(&lck1->context, &lck2->context) &&
lck2 209 source3/locking/brlock.c lck1->fnum == lck2->fnum)
lck2 213 source3/locking/brlock.c return brl_overlap(lck1, lck2);
lck2 296 source3/locking/brlock.c const struct lock_struct *lck2)
lck2 298 source3/locking/brlock.c if (lck1->start != lck2->start) {
lck2 299 source3/locking/brlock.c return (lck1->start - lck2->start);
lck2 301 source3/locking/brlock.c if (lck2->size != lck1->size) {
lck2 302 source3/locking/brlock.c return ((int)lck1->size - (int)lck2->size);
lck2 145 source4/ntvfs/common/brlock_tdb.c struct lock_struct *lck2)
lck2 150 source4/ntvfs/common/brlock_tdb.c lck1->start == lck2->start &&
lck2 151 source4/ntvfs/common/brlock_tdb.c lck1->size == lck2->size) {
lck2 155 source4/ntvfs/common/brlock_tdb.c if (lck1->start >= (lck2->start+lck2->size) ||
lck2 156 source4/ntvfs/common/brlock_tdb.c lck2->start >= (lck1->start+lck1->size)) {
lck2 175 source4/ntvfs/common/brlock_tdb.c struct lock_struct *lck2)
lck2 179 source4/ntvfs/common/brlock_tdb.c lck2->lock_type >= PENDING_READ_LOCK) {
lck2 183 source4/ntvfs/common/brlock_tdb.c if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK) {
lck2 187 source4/ntvfs/common/brlock_tdb.c if (brl_tdb_same_context(&lck1->context, &lck2->context) &&
lck2 188 source4/ntvfs/common/brlock_tdb.c lck2->lock_type == READ_LOCK && lck1->ntvfs == lck2->ntvfs) {
lck2 192 source4/ntvfs/common/brlock_tdb.c return brl_tdb_overlap(lck1, lck2);
lck2 200 source4/ntvfs/common/brlock_tdb.c static bool brl_tdb_conflict_other(struct lock_struct *lck1, struct lock_struct *lck2)
lck2 204 source4/ntvfs/common/brlock_tdb.c lck2->lock_type >= PENDING_READ_LOCK) {
lck2 208 source4/ntvfs/common/brlock_tdb.c if (lck1->lock_type == READ_LOCK && lck2->lock_type == READ_LOCK)
lck2 216 source4/ntvfs/common/brlock_tdb.c if (brl_tdb_same_context(&lck1->context, &lck2->context) &&
lck2 217 source4/ntvfs/common/brlock_tdb.c lck1->ntvfs == lck2->ntvfs &&
lck2 218 source4/ntvfs/common/brlock_tdb.c (lck2->lock_type == READ_LOCK || lck1->lock_type == WRITE_LOCK)) {
lck2 222 source4/ntvfs/common/brlock_tdb.c return brl_tdb_overlap(lck1, lck2);
lck2 996 source4/ntvfs/ntvfs_generic.c union smb_lock *lck2;
lck2 999 source4/ntvfs/ntvfs_generic.c lck2 = talloc(req, union smb_lock);
lck2 1000 source4/ntvfs/ntvfs_generic.c if (lck2 == NULL) {
lck2 1004 source4/ntvfs/ntvfs_generic.c locks = talloc_array(lck2, struct smb_lock_entry, 1);
lck2 1014 source4/ntvfs/ntvfs_generic.c lck2->generic.level = RAW_LOCK_GENERIC;
lck2 1015 source4/ntvfs/ntvfs_generic.c lck2->generic.in.file.ntvfs= lck->lock.in.file.ntvfs;
lck2 1016 source4/ntvfs/ntvfs_generic.c lck2->generic.in.mode = 0;
lck2 1017 source4/ntvfs/ntvfs_generic.c lck2->generic.in.timeout = 0;
lck2 1018 source4/ntvfs/ntvfs_generic.c lck2->generic.in.ulock_cnt = 0;
lck2 1019 source4/ntvfs/ntvfs_generic.c lck2->generic.in.lock_cnt = 1;
lck2 1020 source4/ntvfs/ntvfs_generic.c lck2->generic.in.locks = locks;
lck2 1027 source4/ntvfs/ntvfs_generic.c lck2->generic.level = RAW_LOCK_GENERIC;
lck2 1028 source4/ntvfs/ntvfs_generic.c lck2->generic.in.file.ntvfs= lck->unlock.in.file.ntvfs;
lck2 1029 source4/ntvfs/ntvfs_generic.c lck2->generic.in.mode = 0;
lck2 1030 source4/ntvfs/ntvfs_generic.c lck2->generic.in.timeout = 0;
lck2 1031 source4/ntvfs/ntvfs_generic.c lck2->generic.in.ulock_cnt = 1;
lck2 1032 source4/ntvfs/ntvfs_generic.c lck2->generic.in.lock_cnt = 0;
lck2 1033 source4/ntvfs/ntvfs_generic.c lck2->generic.in.locks = locks;
lck2 1048 source4/ntvfs/ntvfs_generic.c lck2->generic.level = RAW_LOCK_GENERIC;
lck2 1049 source4/ntvfs/ntvfs_generic.c lck2->generic.in.file.ntvfs= lck->smb2.in.file.ntvfs;
lck2 1050 source4/ntvfs/ntvfs_generic.c lck2->generic.in.timeout = UINT32_MAX;
lck2 1051 source4/ntvfs/ntvfs_generic.c lck2->generic.in.mode = 0;
lck2 1052 source4/ntvfs/ntvfs_generic.c lck2->generic.in.lock_cnt = 0;
lck2 1053 source4/ntvfs/ntvfs_generic.c lck2->generic.in.ulock_cnt = 0;
lck2 1054 source4/ntvfs/ntvfs_generic.c lck2->generic.in.locks = talloc_zero_array(lck2, struct smb_lock_entry,
lck2 1056 source4/ntvfs/ntvfs_generic.c if (lck2->generic.in.locks == NULL) {
lck2 1062 source4/ntvfs/ntvfs_generic.c lck2->generic.in.ulock_cnt = lck->smb2.in.lock_count;
lck2 1065 source4/ntvfs/ntvfs_generic.c lck2->generic.in.lock_cnt = lck->smb2.in.lock_count;
lck2 1078 source4/ntvfs/ntvfs_generic.c lck2->generic.in.locks[i].pid = req->smbpid;
lck2 1079 source4/ntvfs/ntvfs_generic.c lck2->generic.in.locks[i].offset = lck->smb2.in.locks[i].offset;
lck2 1080 source4/ntvfs/ntvfs_generic.c lck2->generic.in.locks[i].count = lck->smb2.in.locks[i].length;
lck2 1082 source4/ntvfs/ntvfs_generic.c lck2->generic.in.mode = LOCKING_ANDX_SHARED_LOCK;
lck2 1085 source4/ntvfs/ntvfs_generic.c lck2->generic.in.timeout = 0;
lck2 1094 source4/ntvfs/ntvfs_generic.c lck2->generic.level = RAW_LOCK_GENERIC;
lck2 1095 source4/ntvfs/ntvfs_generic.c lck2->generic.in.file.ntvfs = lck->smb2_break.in.file.ntvfs;
lck2 1096 source4/ntvfs/ntvfs_generic.c lck2->generic.in.mode = LOCKING_ANDX_OPLOCK_RELEASE |
lck2 1098 source4/ntvfs/ntvfs_generic.c lck2->generic.in.timeout = 0;
lck2 1099 source4/ntvfs/ntvfs_generic.c lck2->generic.in.ulock_cnt = 0;
lck2 1100 source4/ntvfs/ntvfs_generic.c lck2->generic.in.lock_cnt = 0;
lck2 1101 source4/ntvfs/ntvfs_generic.c lck2->generic.in.locks = NULL;
lck2 1116 source4/ntvfs/ntvfs_generic.c return ntvfs->ops->lock(ntvfs, req, lck2);