xref: /illumos-gate/usr/src/uts/common/fs/smbsrv/smb_lock.c (revision 148c5f43199ca0b43fc8e3b643aab11cd66ea327)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23  */
24 
25 /*
26  * This module provides range lock functionality for CIFS/SMB clients.
27  * Lock range service functions process SMB lock and and unlock
28  * requests for a file by applying lock rules and marks file range
29  * as locked if the lock is successful otherwise return proper
30  * error code.
31  */
32 
33 #include <smbsrv/smb_kproto.h>
34 #include <smbsrv/smb_fsops.h>
35 #include <sys/nbmlock.h>
36 #include <sys/param.h>
37 
38 extern caller_context_t smb_ct;
39 
40 static void smb_lock_posix_unlock(smb_node_t *, smb_lock_t *, cred_t *);
41 static boolean_t smb_is_range_unlocked(uint64_t, uint64_t, uint32_t,
42     smb_llist_t *, uint64_t *);
43 static int smb_lock_range_overlap(smb_lock_t *, uint64_t, uint64_t);
44 static uint32_t smb_lock_range_lckrules(smb_request_t *, smb_ofile_t *,
45     smb_node_t *, smb_lock_t *, smb_lock_t **);
46 static clock_t smb_lock_wait(smb_request_t *, smb_lock_t *, smb_lock_t *);
47 static uint32_t smb_lock_range_ulckrules(smb_request_t *, smb_node_t *,
48     uint64_t, uint64_t, smb_lock_t **nodelock);
49 static smb_lock_t *smb_lock_create(smb_request_t *, uint64_t, uint64_t,
50     uint32_t, uint32_t);
51 static void smb_lock_destroy(smb_lock_t *);
52 static void smb_lock_free(smb_lock_t *);
53 
54 /*
55  * Return the number of range locks on the specified ofile.
56  */
57 uint32_t
58 smb_lock_get_lock_count(smb_node_t *node, smb_ofile_t *of)
59 {
60 	smb_lock_t 	*lock;
61 	smb_llist_t	*llist;
62 	uint32_t	count = 0;
63 
64 	SMB_NODE_VALID(node);
65 	SMB_OFILE_VALID(of);
66 
67 	llist = &node->n_lock_list;
68 
69 	smb_llist_enter(llist, RW_READER);
70 	for (lock = smb_llist_head(llist);
71 	    lock != NULL;
72 	    lock = smb_llist_next(llist, lock)) {
73 		if (lock->l_file == of)
74 			++count;
75 	}
76 	smb_llist_exit(llist);
77 
78 	return (count);
79 }
80 
81 /*
82  * smb_unlock_range
83  *
84  * locates lock range performed for corresponding to unlock request.
85  *
86  * NT_STATUS_SUCCESS - Lock range performed successfully.
87  * !NT_STATUS_SUCCESS - Error in unlock range operation.
88  */
89 uint32_t
90 smb_unlock_range(
91     smb_request_t	*sr,
92     smb_node_t		*node,
93     uint64_t		start,
94     uint64_t		length)
95 {
96 	smb_lock_t	*lock = NULL;
97 	uint32_t	status;
98 
99 	/* Apply unlocking rules */
100 	smb_llist_enter(&node->n_lock_list, RW_WRITER);
101 	status = smb_lock_range_ulckrules(sr, node, start, length, &lock);
102 	if (status != NT_STATUS_SUCCESS) {
103 		/*
104 		 * If lock range is not matching in the list
105 		 * return error.
106 		 */
107 		ASSERT(lock == NULL);
108 		smb_llist_exit(&node->n_lock_list);
109 		return (status);
110 	}
111 
112 	smb_llist_remove(&node->n_lock_list, lock);
113 	smb_lock_posix_unlock(node, lock, sr->user_cr);
114 	smb_llist_exit(&node->n_lock_list);
115 	smb_lock_destroy(lock);
116 
117 	return (status);
118 }
119 
120 /*
121  * smb_lock_range
122  *
123  * checks for integrity of file lock operation for the given range of file data.
124  * This is performed by applying lock rules with all the elements of the node
125  * lock list.
126  *
127  * The function returns with new lock added if lock request is non-conflicting
128  * with existing range lock for the file. Otherwise smb request is filed
129  * without returning.
130  *
131  * NT_STATUS_SUCCESS - Lock range performed successfully.
132  * !NT_STATUS_SUCCESS - Error in lock range operation.
133  */
134 uint32_t
135 smb_lock_range(
136     smb_request_t	*sr,
137     uint64_t		start,
138     uint64_t		length,
139     uint32_t		timeout,
140     uint32_t		locktype)
141 {
142 	smb_ofile_t	*file = sr->fid_ofile;
143 	smb_node_t	*node = file->f_node;
144 	smb_lock_t	*lock;
145 	smb_lock_t	*clock = NULL;
146 	uint32_t	result = NT_STATUS_SUCCESS;
147 	boolean_t	lock_has_timeout = (timeout != 0);
148 
149 	lock = smb_lock_create(sr, start, length, locktype, timeout);
150 
151 	smb_llist_enter(&node->n_lock_list, RW_WRITER);
152 	for (;;) {
153 		clock_t	rc;
154 
155 		/* Apply locking rules */
156 		result = smb_lock_range_lckrules(sr, file, node, lock, &clock);
157 
158 		if ((result == NT_STATUS_CANCELLED) ||
159 		    (result == NT_STATUS_SUCCESS) ||
160 		    (result == NT_STATUS_RANGE_NOT_LOCKED)) {
161 			ASSERT(clock == NULL);
162 			break;
163 		} else if (timeout == 0) {
164 			break;
165 		}
166 
167 		ASSERT(result == NT_STATUS_LOCK_NOT_GRANTED);
168 		ASSERT(clock);
169 		/*
170 		 * Call smb_lock_wait holding write lock for
171 		 * node lock list.  smb_lock_wait will release
172 		 * this lock if it blocks.
173 		 */
174 		ASSERT(node == clock->l_file->f_node);
175 
176 		rc = smb_lock_wait(sr, lock, clock);
177 		if (rc == 0) {
178 			result = NT_STATUS_CANCELLED;
179 			break;
180 		}
181 		if (rc == -1)
182 			timeout = 0;
183 
184 		clock = NULL;
185 	}
186 
187 	lock->l_blocked_by = NULL;
188 
189 	if (result != NT_STATUS_SUCCESS) {
190 		/*
191 		 * Under certain conditions NT_STATUS_FILE_LOCK_CONFLICT
192 		 * should be returned instead of NT_STATUS_LOCK_NOT_GRANTED.
193 		 */
194 		if (result == NT_STATUS_LOCK_NOT_GRANTED) {
195 			/*
196 			 * Locks with timeouts always return
197 			 * NT_STATUS_FILE_LOCK_CONFLICT
198 			 */
199 			if (lock_has_timeout)
200 				result = NT_STATUS_FILE_LOCK_CONFLICT;
201 
202 			/*
203 			 * Locks starting higher than 0xef000000 that do not
204 			 * have the MSB set always return
205 			 * NT_STATUS_FILE_LOCK_CONFLICT
206 			 */
207 			if ((lock->l_start >= 0xef000000) &&
208 			    !(lock->l_start & (1ULL << 63))) {
209 				result = NT_STATUS_FILE_LOCK_CONFLICT;
210 			}
211 
212 			/*
213 			 * If the last lock attempt to fail on this file handle
214 			 * started at the same offset as this one then return
215 			 * NT_STATUS_FILE_LOCK_CONFLICT
216 			 */
217 			mutex_enter(&file->f_mutex);
218 			if ((file->f_flags & SMB_OFLAGS_LLF_POS_VALID) &&
219 			    (lock->l_start == file->f_llf_pos)) {
220 				result = NT_STATUS_FILE_LOCK_CONFLICT;
221 			}
222 			mutex_exit(&file->f_mutex);
223 		}
224 
225 		/* Update last lock failed offset */
226 		mutex_enter(&file->f_mutex);
227 		file->f_llf_pos = lock->l_start;
228 		file->f_flags |= SMB_OFLAGS_LLF_POS_VALID;
229 		mutex_exit(&file->f_mutex);
230 
231 		smb_lock_free(lock);
232 	} else {
233 		/*
234 		 * don't insert into the CIFS lock list unless the
235 		 * posix lock worked
236 		 */
237 		if (smb_fsop_frlock(node, lock, B_FALSE, sr->user_cr))
238 			result = NT_STATUS_FILE_LOCK_CONFLICT;
239 		else
240 			smb_llist_insert_tail(&node->n_lock_list, lock);
241 	}
242 	smb_llist_exit(&node->n_lock_list);
243 
244 	return (result);
245 }
246 
247 
248 /*
249  * smb_lock_range_access
250  *
251  * scans node lock list
252  * to check if there is any overlapping lock. Overlapping
253  * lock is allowed only under same session and client pid.
254  *
255  * Return values
256  *	NT_STATUS_SUCCESS		lock access granted.
257  *	NT_STATUS_FILE_LOCK_CONFLICT 	access denied due to lock conflict.
258  */
259 int
260 smb_lock_range_access(
261     smb_request_t	*sr,
262     smb_node_t		*node,
263     uint64_t		start,
264     uint64_t		length,
265     boolean_t		will_write)
266 {
267 	smb_lock_t	*lock;
268 	smb_llist_t	*llist;
269 	int		status = NT_STATUS_SUCCESS;
270 
271 	llist = &node->n_lock_list;
272 	smb_llist_enter(llist, RW_READER);
273 	/* Search for any applicable lock */
274 	for (lock = smb_llist_head(llist);
275 	    lock != NULL;
276 	    lock = smb_llist_next(llist, lock)) {
277 
278 		if (!smb_lock_range_overlap(lock, start, length))
279 			/* Lock does not overlap */
280 			continue;
281 
282 		if (lock->l_type == SMB_LOCK_TYPE_READONLY && !will_write)
283 			continue;
284 
285 		if (lock->l_type == SMB_LOCK_TYPE_READWRITE &&
286 		    lock->l_session_kid == sr->session->s_kid &&
287 		    lock->l_pid == sr->smb_pid)
288 			continue;
289 
290 		status = NT_STATUS_FILE_LOCK_CONFLICT;
291 		break;
292 	}
293 	smb_llist_exit(llist);
294 	return (status);
295 }
296 
297 void
298 smb_node_destroy_lock_by_ofile(smb_node_t *node, smb_ofile_t *file)
299 {
300 	smb_lock_t	*lock;
301 	smb_lock_t	*nxtl;
302 	list_t		destroy_list;
303 
304 	SMB_NODE_VALID(node);
305 	ASSERT(node->n_refcnt);
306 
307 	/*
308 	 * Move locks matching the specified file from the node->n_lock_list
309 	 * to a temporary list (holding the lock the entire time) then
310 	 * destroy all the matching locks.  We can't call smb_lock_destroy
311 	 * while we are holding the lock for node->n_lock_list because we will
312 	 * deadlock and we can't drop the lock because the list contents might
313 	 * change (for example nxtl might get removed on another thread).
314 	 */
315 	list_create(&destroy_list, sizeof (smb_lock_t),
316 	    offsetof(smb_lock_t, l_lnd));
317 
318 	smb_llist_enter(&node->n_lock_list, RW_WRITER);
319 	lock = smb_llist_head(&node->n_lock_list);
320 	while (lock) {
321 		nxtl = smb_llist_next(&node->n_lock_list, lock);
322 		if (lock->l_file == file) {
323 			smb_llist_remove(&node->n_lock_list, lock);
324 			smb_lock_posix_unlock(node, lock, file->f_user->u_cred);
325 			list_insert_tail(&destroy_list, lock);
326 		}
327 		lock = nxtl;
328 	}
329 	smb_llist_exit(&node->n_lock_list);
330 
331 	lock = list_head(&destroy_list);
332 	while (lock) {
333 		nxtl = list_next(&destroy_list, lock);
334 		list_remove(&destroy_list, lock);
335 		smb_lock_destroy(lock);
336 		lock = nxtl;
337 	}
338 
339 	list_destroy(&destroy_list);
340 }
341 
342 void
343 smb_lock_range_error(smb_request_t *sr, uint32_t status32)
344 {
345 	uint16_t errcode;
346 
347 	if (status32 == NT_STATUS_CANCELLED)
348 		errcode = ERROR_OPERATION_ABORTED;
349 	else
350 		errcode = ERRlock;
351 
352 	smbsr_error(sr, status32, ERRDOS, errcode);
353 }
354 
355 /*
356  * smb_range_check()
357  *
358  * Perform range checking.  First check for internal CIFS range conflicts
359  * and then check for external conflicts, for example, with NFS or local
360  * access.
361  *
362  * If nbmand is enabled, this function must be called from within an nbmand
363  * critical region
364  */
365 
366 DWORD
367 smb_range_check(smb_request_t *sr, smb_node_t *node, uint64_t start,
368     uint64_t length, boolean_t will_write)
369 {
370 	smb_error_t smberr;
371 	int svmand;
372 	int nbl_op;
373 	int rc;
374 
375 	SMB_NODE_VALID(node);
376 
377 	ASSERT(smb_node_in_crit(node));
378 
379 	if (smb_node_is_dir(node))
380 		return (NT_STATUS_SUCCESS);
381 
382 	rc = smb_lock_range_access(sr, node, start, length, will_write);
383 	if (rc)
384 		return (NT_STATUS_FILE_LOCK_CONFLICT);
385 
386 	if ((rc = nbl_svmand(node->vp, kcred, &svmand)) != 0) {
387 		smbsr_map_errno(rc, &smberr);
388 		return (smberr.status);
389 	}
390 
391 	nbl_op = (will_write) ? NBL_WRITE : NBL_READ;
392 
393 	if (nbl_lock_conflict(node->vp, nbl_op, start, length, svmand, &smb_ct))
394 		return (NT_STATUS_FILE_LOCK_CONFLICT);
395 
396 	return (NT_STATUS_SUCCESS);
397 }
398 
399 /*
400  * smb_lock_posix_unlock
401  *
402  * checks if the current unlock request is in another lock and repeatedly calls
403  * smb_is_range_unlocked on a sliding basis to unlock all bits of the lock
404  * that are not in other locks
405  *
406  */
407 static void
408 smb_lock_posix_unlock(smb_node_t *node, smb_lock_t *lock, cred_t *cr)
409 {
410 	uint64_t	new_mark;
411 	uint64_t	unlock_start;
412 	uint64_t	unlock_end;
413 	smb_lock_t	new_unlock;
414 	smb_llist_t	*llist;
415 	boolean_t	can_unlock;
416 
417 	new_mark = 0;
418 	unlock_start = lock->l_start;
419 	unlock_end = unlock_start + lock->l_length;
420 	llist = &node->n_lock_list;
421 
422 	for (;;) {
423 		can_unlock = smb_is_range_unlocked(unlock_start, unlock_end,
424 		    lock->l_file->f_uniqid, llist, &new_mark);
425 		if (can_unlock) {
426 			if (new_mark) {
427 				new_unlock = *lock;
428 				new_unlock.l_start = unlock_start;
429 				new_unlock.l_length = new_mark - unlock_start;
430 				(void) smb_fsop_frlock(node, &new_unlock,
431 				    B_TRUE, cr);
432 				unlock_start = new_mark;
433 			} else {
434 				new_unlock = *lock;
435 				new_unlock.l_start = unlock_start;
436 				new_unlock.l_length = unlock_end - unlock_start;
437 				(void) smb_fsop_frlock(node, &new_unlock,
438 				    B_TRUE, cr);
439 				break;
440 			}
441 		} else if (new_mark) {
442 			unlock_start = new_mark;
443 		} else {
444 			break;
445 		}
446 	}
447 }
448 
449 /*
450  * smb_lock_range_overlap
451  *
452  * Checks if lock range(start, length) overlaps range in lock structure.
453  *
454  * Zero-length byte range locks actually affect no single byte of the stream,
455  * meaning they can still be accessed even with such locks in place. However,
456  * they do conflict with other ranges in the following manner:
457  *  conflict will only exist if the positive-length range contains the
458  *  zero-length range's offset but doesn't start at it
459  *
460  * return values:
461  *	0 - Lock range doesn't overlap
462  *	1 - Lock range overlaps.
463  */
464 
465 #define	RANGE_NO_OVERLAP	0
466 #define	RANGE_OVERLAP		1
467 
468 static int
469 smb_lock_range_overlap(struct smb_lock *lock, uint64_t start, uint64_t length)
470 {
471 	if (length == 0) {
472 		if ((lock->l_start < start) &&
473 		    ((lock->l_start + lock->l_length) > start))
474 			return (RANGE_OVERLAP);
475 
476 		return (RANGE_NO_OVERLAP);
477 	}
478 
479 	/* The following test is intended to catch roll over locks. */
480 	if ((start == lock->l_start) && (length == lock->l_length))
481 		return (RANGE_OVERLAP);
482 
483 	if (start < lock->l_start) {
484 		if (start + length > lock->l_start)
485 			return (RANGE_OVERLAP);
486 	} else if (start < lock->l_start + lock->l_length)
487 		return (RANGE_OVERLAP);
488 
489 	return (RANGE_NO_OVERLAP);
490 }
491 
492 /*
493  * smb_lock_range_lckrules
494  *
495  * Lock range rules:
496  *	1. Overlapping read locks are allowed if the
497  *	   current locks in the region are only read locks
498  *	   irrespective of pid of smb client issuing lock request.
499  *
500  *	2. Read lock in the overlapped region of write lock
501  *	   are allowed if the pervious lock is performed by the
502  *	   same pid and connection.
503  *
504  * return status:
505  *	NT_STATUS_SUCCESS - Input lock range adapts to lock rules.
506  *	NT_STATUS_LOCK_NOT_GRANTED - Input lock conflicts lock rules.
507  *	NT_STATUS_CANCELLED - Error in processing lock rules
508  */
509 static uint32_t
510 smb_lock_range_lckrules(
511     smb_request_t	*sr,
512     smb_ofile_t		*file,
513     smb_node_t		*node,
514     smb_lock_t		*dlock,
515     smb_lock_t		**clockp)
516 {
517 	smb_lock_t	*lock;
518 	uint32_t	status = NT_STATUS_SUCCESS;
519 
520 	/* Check if file is closed */
521 	if (!smb_ofile_is_open(file)) {
522 		return (NT_STATUS_RANGE_NOT_LOCKED);
523 	}
524 
525 	/* Caller must hold lock for node->n_lock_list */
526 	for (lock = smb_llist_head(&node->n_lock_list);
527 	    lock != NULL;
528 	    lock = smb_llist_next(&node->n_lock_list, lock)) {
529 
530 		if (!smb_lock_range_overlap(lock, dlock->l_start,
531 		    dlock->l_length))
532 			continue;
533 
534 		/*
535 		 * Check to see if lock in the overlapping record
536 		 * is only read lock. Current finding is read
537 		 * locks can overlapped irrespective of pids.
538 		 */
539 		if ((lock->l_type == SMB_LOCK_TYPE_READONLY) &&
540 		    (dlock->l_type == SMB_LOCK_TYPE_READONLY)) {
541 			continue;
542 		}
543 
544 		/*
545 		 * When the read lock overlaps write lock, check if
546 		 * allowed.
547 		 */
548 		if ((dlock->l_type == SMB_LOCK_TYPE_READONLY) &&
549 		    !(lock->l_type == SMB_LOCK_TYPE_READONLY)) {
550 			if (lock->l_file == sr->fid_ofile &&
551 			    lock->l_session_kid == sr->session->s_kid &&
552 			    lock->l_pid == sr->smb_pid &&
553 			    lock->l_uid == sr->smb_uid) {
554 				continue;
555 			}
556 		}
557 
558 		/* Conflict in overlapping lock element */
559 		*clockp = lock;
560 		status = NT_STATUS_LOCK_NOT_GRANTED;
561 		break;
562 	}
563 
564 	return (status);
565 }
566 
567 /*
568  * smb_lock_wait
569  *
570  * Wait operation for smb overlapping lock to be released.  Caller must hold
571  * write lock for node->n_lock_list so that the set of active locks can't
572  * change unexpectedly.  The lock for node->n_lock_list  will be released
573  * within this function during the sleep after the lock dependency has
574  * been recorded.
575  *
576  * return value
577  *
578  *	0	The request was canceled.
579  *	-1	The timeout was reached.
580  *	>0	Condition met.
581  */
582 static clock_t
583 smb_lock_wait(smb_request_t *sr, smb_lock_t *b_lock, smb_lock_t *c_lock)
584 {
585 	clock_t		rc;
586 
587 	ASSERT(sr->sr_awaiting == NULL);
588 
589 	mutex_enter(&sr->sr_mutex);
590 
591 	switch (sr->sr_state) {
592 	case SMB_REQ_STATE_ACTIVE:
593 		/*
594 		 * Wait up till the timeout time keeping track of actual
595 		 * time waited for possible retry failure.
596 		 */
597 		sr->sr_state = SMB_REQ_STATE_WAITING_LOCK;
598 		sr->sr_awaiting = c_lock;
599 		mutex_exit(&sr->sr_mutex);
600 
601 		mutex_enter(&c_lock->l_mutex);
602 		/*
603 		 * The conflict list (l_conflict_list) for a lock contains
604 		 * all the locks that are blocked by and in conflict with
605 		 * that lock.  Add the new lock to the conflict list for the
606 		 * active lock.
607 		 *
608 		 * l_conflict_list is currently a fancy way of representing
609 		 * the references/dependencies on a lock.  It could be
610 		 * replaced with a reference count but this approach
611 		 * has the advantage that MDB can display the lock
612 		 * dependencies at any point in time.  In the future
613 		 * we should be able to leverage the list to implement
614 		 * an asynchronous locking model.
615 		 *
616 		 * l_blocked_by is the reverse of the conflict list.  It
617 		 * points to the lock that the new lock conflicts with.
618 		 * As currently implemented this value is purely for
619 		 * debug purposes -- there are windows of time when
620 		 * l_blocked_by may be non-NULL even though there is no
621 		 * conflict list
622 		 */
623 		b_lock->l_blocked_by = c_lock;
624 		smb_slist_insert_tail(&c_lock->l_conflict_list, b_lock);
625 		smb_llist_exit(&c_lock->l_file->f_node->n_lock_list);
626 
627 		if (SMB_LOCK_INDEFINITE_WAIT(b_lock)) {
628 			cv_wait(&c_lock->l_cv, &c_lock->l_mutex);
629 		} else {
630 			rc = cv_timedwait(&c_lock->l_cv,
631 			    &c_lock->l_mutex, b_lock->l_end_time);
632 		}
633 
634 		mutex_exit(&c_lock->l_mutex);
635 
636 		smb_llist_enter(&c_lock->l_file->f_node->n_lock_list,
637 		    RW_WRITER);
638 		smb_slist_remove(&c_lock->l_conflict_list, b_lock);
639 
640 		mutex_enter(&sr->sr_mutex);
641 		sr->sr_awaiting = NULL;
642 		if (sr->sr_state == SMB_REQ_STATE_CANCELED) {
643 			rc = 0;
644 		} else {
645 			sr->sr_state = SMB_REQ_STATE_ACTIVE;
646 		}
647 		break;
648 
649 	default:
650 		ASSERT(sr->sr_state == SMB_REQ_STATE_CANCELED);
651 		rc = 0;
652 		break;
653 	}
654 	mutex_exit(&sr->sr_mutex);
655 
656 	return (rc);
657 }
658 
659 /*
660  * smb_lock_range_ulckrules
661  *
662  *	1. Unlock should be performed at exactly matching ends.
663  *	   This has been changed because overlapping ends is
664  *	   allowed and there is no other precise way of locating
665  *	   lock entity in node lock list.
666  *
667  *	2. Unlock is failed if there is no corresponding lock exists.
668  *
669  * Return values
670  *
671  *	NT_STATUS_SUCCESS		Unlock request matches lock record
672  *					pointed by 'nodelock' lock structure.
673  *
674  *	NT_STATUS_RANGE_NOT_LOCKED	Unlock request doen't match any
675  *					of lock record in node lock request or
676  *					error in unlock range processing.
677  */
678 static uint32_t
679 smb_lock_range_ulckrules(
680     smb_request_t	*sr,
681     smb_node_t		*node,
682     uint64_t		start,
683     uint64_t		length,
684     smb_lock_t		**nodelock)
685 {
686 	smb_lock_t	*lock;
687 	uint32_t	status = NT_STATUS_RANGE_NOT_LOCKED;
688 
689 	/* Caller must hold lock for node->n_lock_list */
690 	for (lock = smb_llist_head(&node->n_lock_list);
691 	    lock != NULL;
692 	    lock = smb_llist_next(&node->n_lock_list, lock)) {
693 
694 		if ((start == lock->l_start) &&
695 		    (length == lock->l_length) &&
696 		    lock->l_file == sr->fid_ofile &&
697 		    lock->l_session_kid == sr->session->s_kid &&
698 		    lock->l_pid == sr->smb_pid &&
699 		    lock->l_uid == sr->smb_uid) {
700 			*nodelock = lock;
701 			status = NT_STATUS_SUCCESS;
702 			break;
703 		}
704 	}
705 
706 	return (status);
707 }
708 
709 static smb_lock_t *
710 smb_lock_create(
711     smb_request_t *sr,
712     uint64_t start,
713     uint64_t length,
714     uint32_t locktype,
715     uint32_t timeout)
716 {
717 	smb_lock_t *lock;
718 
719 	ASSERT(locktype == SMB_LOCK_TYPE_READWRITE ||
720 	    locktype == SMB_LOCK_TYPE_READONLY);
721 
722 	lock = kmem_zalloc(sizeof (smb_lock_t), KM_SLEEP);
723 	lock->l_magic = SMB_LOCK_MAGIC;
724 	lock->l_sr = sr; /* Invalid after lock is active */
725 	lock->l_session_kid = sr->session->s_kid;
726 	lock->l_session = sr->session;
727 	lock->l_file = sr->fid_ofile;
728 	lock->l_uid = sr->smb_uid;
729 	lock->l_pid = sr->smb_pid;
730 	lock->l_type = locktype;
731 	lock->l_start = start;
732 	lock->l_length = length;
733 	/*
734 	 * Calculate the absolute end time so that we can use it
735 	 * in cv_timedwait.
736 	 */
737 	lock->l_end_time = ddi_get_lbolt() + MSEC_TO_TICK(timeout);
738 	if (timeout == UINT_MAX)
739 		lock->l_flags |= SMB_LOCK_FLAG_INDEFINITE;
740 
741 	mutex_init(&lock->l_mutex, NULL, MUTEX_DEFAULT, NULL);
742 	cv_init(&lock->l_cv, NULL, CV_DEFAULT, NULL);
743 	smb_slist_constructor(&lock->l_conflict_list, sizeof (smb_lock_t),
744 	    offsetof(smb_lock_t, l_conflict_lnd));
745 
746 	return (lock);
747 }
748 
749 static void
750 smb_lock_free(smb_lock_t *lock)
751 {
752 	smb_slist_destructor(&lock->l_conflict_list);
753 	cv_destroy(&lock->l_cv);
754 	mutex_destroy(&lock->l_mutex);
755 
756 	kmem_free(lock, sizeof (smb_lock_t));
757 }
758 
759 /*
760  * smb_lock_destroy
761  *
762  * Caller must hold node->n_lock_list
763  */
764 static void
765 smb_lock_destroy(smb_lock_t *lock)
766 {
767 	/*
768 	 * Caller must hold node->n_lock_list lock.
769 	 */
770 	mutex_enter(&lock->l_mutex);
771 	cv_broadcast(&lock->l_cv);
772 	mutex_exit(&lock->l_mutex);
773 
774 	/*
775 	 * The cv_broadcast above should wake up any locks that previous
776 	 * had conflicts with this lock.  Wait for the locking threads
777 	 * to remove their references to this lock.
778 	 */
779 	smb_slist_wait_for_empty(&lock->l_conflict_list);
780 
781 	smb_lock_free(lock);
782 }
783 
784 /*
785  * smb_is_range_unlocked
786  *
787  * Checks if the current unlock byte range request overlaps another lock
788  * This function is used to determine where POSIX unlocks should be
789  * applied.
790  *
791  * The return code and the value of new_mark must be interpreted as
792  * follows:
793  *
794  * B_TRUE and (new_mark == 0):
795  *   This is the last or only lock left to be unlocked
796  *
797  * B_TRUE and (new_mark > 0):
798  *   The range from start to new_mark can be unlocked
799  *
800  * B_FALSE and (new_mark == 0):
801  *   The unlock can't be performed and we are done
802  *
803  * B_FALSE and (new_mark > 0),
804  *   The range from start to new_mark can't be unlocked
805  *   Start should be reset to new_mark for the next pass
806  */
807 
808 static boolean_t
809 smb_is_range_unlocked(uint64_t start, uint64_t end, uint32_t uniqid,
810     smb_llist_t *llist_head, uint64_t *new_mark)
811 {
812 	struct smb_lock *lk = NULL;
813 	uint64_t low_water_mark = MAXOFFSET_T;
814 	uint64_t lk_start;
815 	uint64_t lk_end;
816 
817 	*new_mark = 0;
818 	lk = smb_llist_head(llist_head);
819 	while (lk) {
820 		if (lk->l_length == 0) {
821 			lk = smb_llist_next(llist_head, lk);
822 			continue;
823 		}
824 
825 		if (lk->l_file->f_uniqid != uniqid) {
826 			lk = smb_llist_next(llist_head, lk);
827 			continue;
828 		}
829 
830 		lk_end = lk->l_start + lk->l_length - 1;
831 		lk_start = lk->l_start;
832 
833 		/*
834 		 * there is no overlap for the first 2 cases
835 		 * check next node
836 		 */
837 		if (lk_end < start) {
838 			lk = smb_llist_next(llist_head, lk);
839 			continue;
840 		}
841 		if (lk_start > end) {
842 			lk = smb_llist_next(llist_head, lk);
843 			continue;
844 		}
845 
846 		/* this range is completely locked */
847 		if ((lk_start <= start) && (lk_end >= end)) {
848 			return (B_FALSE);
849 		}
850 
851 		/* the first part of this range is locked */
852 		if ((start >= lk_start) && (start <= lk_end)) {
853 			if (end > lk_end)
854 				*new_mark = lk_end + 1;
855 			return (B_FALSE);
856 		}
857 
858 		/* this piece is unlocked */
859 		if ((lk_start >= start) && (lk_start <= end)) {
860 			if (low_water_mark > lk_start)
861 				low_water_mark  = lk_start;
862 		}
863 
864 		lk = smb_llist_next(llist_head, lk);
865 	}
866 
867 	if (low_water_mark != MAXOFFSET_T) {
868 		*new_mark = low_water_mark;
869 		return (B_TRUE);
870 	}
871 	/* the range is completely unlocked */
872 	return (B_TRUE);
873 }
874