1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 #include <emlxs.h>
28 
29 EMLXS_MSG_DEF(EMLXS_MEM_C);
30 
31 
32 /*
33  *  emlxs_mem_alloc_buffer
34  *
35  *  This routine will allocate iocb/data buffer
36  *  space and setup the buffers for all rings on
37  *  the specified board to use. The data buffers
38  *  can be posted to the ring with the
39  *  fc_post_buffer routine.  The iocb buffers
40  *  are used to make a temp copy of the response
41  *  ring iocbs. Returns 0 if not enough memory,
42  *  Returns 1 if successful.
43  */
44 
45 
46 extern int32_t
47 emlxs_mem_alloc_buffer(emlxs_hba_t *hba)
48 {
49 	emlxs_port_t *port = &PPORT;
50 	emlxs_config_t *cfg;
51 	MBUF_INFO *buf_info;
52 	uint8_t *bp;
53 	uint8_t *oldbp;
54 	MEMSEG *mp;
55 	MATCHMAP *matp;
56 	NODELIST *ndlp;
57 	IOCBQ *iocbq;
58 	MAILBOXQ *mbox;
59 	MBUF_INFO bufinfo;
60 	int32_t i;
61 	RING *fcp_rp;
62 	RING *ip_rp;
63 	RING *els_rp;
64 	RING *ct_rp;
65 	uint32_t total_iotags;
66 #ifdef EMLXS_SPARC
67 	int32_t j;
68 	ULP_BDE64 *p_bpl;
69 	ULP_BDE64 *v_bpl;
70 #endif	/* EMLXS_SPARC */
71 
72 	buf_info = &bufinfo;
73 	cfg = &CFG;
74 
75 	mutex_enter(&EMLXS_MEMGET_LOCK);
76 
77 	/*
78 	 * Allocate and Initialize MEM_NLP (0)
79 	 */
80 	mp = &hba->memseg[MEM_NLP];
81 	mp->fc_memsize = sizeof (NODELIST);
82 	mp->fc_numblks = (int16_t)hba->max_nodes + 2;
83 	mp->fc_total_memsize = mp->fc_memsize * mp->fc_numblks;
84 	mp->fc_memstart_virt = kmem_zalloc(mp->fc_total_memsize, KM_SLEEP);
85 	mp->fc_memget_cnt = mp->fc_numblks;
86 	mp->fc_memput_cnt = 0;
87 	mp->fc_memstart_phys = 0;
88 	mp->fc_memflag = 0;
89 	mp->fc_lowmem = 0;
90 	mp->fc_mem_dma_handle = 0;
91 	mp->fc_mem_dat_handle = 0;
92 	mp->fc_memget_ptr = 0;
93 	mp->fc_memget_end = 0;
94 	mp->fc_memput_ptr = 0;
95 	mp->fc_memput_end = 0;
96 
97 	if (mp->fc_memstart_virt == NULL) {
98 		mutex_exit(&EMLXS_MEMGET_LOCK);
99 
100 		(void) emlxs_mem_free_buffer(hba);
101 
102 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
103 		    "NLP memory pool.");
104 
105 		return (0);
106 	}
107 	bzero(mp->fc_memstart_virt, mp->fc_memsize);
108 	ndlp = (NODELIST *)mp->fc_memstart_virt;
109 
110 	/*
111 	 * Link buffer into beginning of list. The first pointer
112 	 * in each buffer is a forward pointer to the next buffer.
113 	 */
114 	for (i = 0; i < mp->fc_numblks; i++, ndlp++) {
115 		ndlp->flag |= NODE_POOL_ALLOCATED;
116 
117 		oldbp = mp->fc_memget_ptr;
118 		bp = (uint8_t *)ndlp;
119 		if (oldbp == NULL) {
120 			mp->fc_memget_end = bp;
121 		}
122 		mp->fc_memget_ptr = bp;
123 		*((uint8_t **)bp) = oldbp;
124 	}
125 
126 
127 	/*
128 	 * Allocate and Initialize MEM_IOCB (1)
129 	 */
130 	mp = &hba->memseg[MEM_IOCB];
131 	mp->fc_memsize = sizeof (IOCBQ);
132 	mp->fc_numblks = (uint16_t)cfg[CFG_NUM_IOCBS].current;
133 	mp->fc_total_memsize = mp->fc_memsize * mp->fc_numblks;
134 	mp->fc_memstart_virt = kmem_zalloc(mp->fc_total_memsize, KM_SLEEP);
135 	mp->fc_lowmem = (mp->fc_numblks >> 4);
136 	mp->fc_memget_cnt = mp->fc_numblks;
137 	mp->fc_memput_cnt = 0;
138 	mp->fc_memflag = 0;
139 	mp->fc_memstart_phys = 0;
140 	mp->fc_mem_dma_handle = 0;
141 	mp->fc_mem_dat_handle = 0;
142 	mp->fc_memget_ptr = 0;
143 	mp->fc_memget_end = 0;
144 	mp->fc_memput_ptr = 0;
145 	mp->fc_memput_end = 0;
146 
147 	if (mp->fc_memstart_virt == NULL) {
148 		mutex_exit(&EMLXS_MEMGET_LOCK);
149 
150 		(void) emlxs_mem_free_buffer(hba);
151 
152 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
153 		    "IOCB memory pool.");
154 
155 		return (0);
156 	}
157 	bzero(mp->fc_memstart_virt, mp->fc_memsize);
158 	iocbq = (IOCBQ *)mp->fc_memstart_virt;
159 
160 	/*
161 	 * Link buffer into beginning of list. The first pointer
162 	 * in each buffer is a forward pointer to the next buffer.
163 	 */
164 	for (i = 0; i < mp->fc_numblks; i++, iocbq++) {
165 		iocbq->flag |= IOCB_POOL_ALLOCATED;
166 
167 		oldbp = mp->fc_memget_ptr;
168 		bp = (uint8_t *)iocbq;
169 		if (oldbp == NULL) {
170 			mp->fc_memget_end = bp;
171 		}
172 		mp->fc_memget_ptr = bp;
173 		*((uint8_t **)bp) = oldbp;
174 	}
175 
176 	/*
177 	 * Allocate and Initialize MEM_MBOX (2)
178 	 */
179 	mp = &hba->memseg[MEM_MBOX];
180 	mp->fc_memsize = sizeof (MAILBOXQ);
181 	mp->fc_numblks = (int16_t)hba->max_nodes + 32;
182 	mp->fc_total_memsize = mp->fc_memsize * mp->fc_numblks;
183 	mp->fc_memstart_virt = kmem_zalloc(mp->fc_total_memsize, KM_SLEEP);
184 	mp->fc_lowmem = (mp->fc_numblks >> 3);
185 	mp->fc_memget_cnt = mp->fc_numblks;
186 	mp->fc_memput_cnt = 0;
187 	mp->fc_memflag = 0;
188 	mp->fc_memstart_phys = 0;
189 	mp->fc_mem_dma_handle = 0;
190 	mp->fc_mem_dat_handle = 0;
191 	mp->fc_memget_ptr = 0;
192 	mp->fc_memget_end = 0;
193 	mp->fc_memput_ptr = 0;
194 	mp->fc_memput_end = 0;
195 
196 	if (mp->fc_memstart_virt == NULL) {
197 		mutex_exit(&EMLXS_MEMGET_LOCK);
198 
199 		(void) emlxs_mem_free_buffer(hba);
200 
201 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
202 		    "MBOX memory pool.");
203 
204 		return (0);
205 	}
206 	bzero(mp->fc_memstart_virt, mp->fc_memsize);
207 	mbox = (MAILBOXQ *)mp->fc_memstart_virt;
208 
209 	/*
210 	 * Link buffer into beginning of list. The first pointer
211 	 * in each buffer is a forward pointer to the next buffer.
212 	 */
213 	for (i = 0; i < mp->fc_numblks; i++, mbox++) {
214 		mbox->flag |= MBQ_POOL_ALLOCATED;
215 
216 		oldbp = mp->fc_memget_ptr;
217 		bp = (uint8_t *)mbox;
218 		if (oldbp == NULL) {
219 			mp->fc_memget_end = bp;
220 		}
221 		mp->fc_memget_ptr = bp;
222 		*((uint8_t **)bp) = oldbp;
223 	}
224 
225 	/*
226 	 * Initialize fc_table
227 	 */
228 	fcp_rp = &hba->ring[FC_FCP_RING];
229 	ip_rp = &hba->ring[FC_IP_RING];
230 	els_rp = &hba->ring[FC_ELS_RING];
231 	ct_rp = &hba->ring[FC_CT_RING];
232 
233 	fcp_rp->max_iotag = cfg[CFG_NUM_IOTAGS].current;
234 	ip_rp->max_iotag = hba->max_nodes;
235 	els_rp->max_iotag = hba->max_nodes;
236 	ct_rp->max_iotag = hba->max_nodes;
237 
238 	/* Allocate the fc_table */
239 	total_iotags = fcp_rp->max_iotag + ip_rp->max_iotag +
240 	    els_rp->max_iotag + ct_rp->max_iotag;
241 
242 	bzero(buf_info, sizeof (MBUF_INFO));
243 	buf_info->size = total_iotags * sizeof (emlxs_buf_t *);
244 	buf_info->align = sizeof (void *);
245 
246 	(void) emlxs_mem_alloc(hba, buf_info);
247 	if (buf_info->virt == NULL) {
248 		mutex_exit(&EMLXS_MEMGET_LOCK);
249 
250 		(void) emlxs_mem_free_buffer(hba);
251 
252 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
253 		    "fc_table buffer.");
254 
255 		return (0);
256 	}
257 	hba->iotag_table = buf_info->virt;
258 	fcp_rp->fc_table = &hba->iotag_table[0];
259 	ip_rp->fc_table = &hba->iotag_table[fcp_rp->max_iotag];
260 	els_rp->fc_table =
261 	    &hba->iotag_table[fcp_rp->max_iotag + ip_rp->max_iotag];
262 	ct_rp->fc_table =
263 	    &hba->iotag_table[fcp_rp->max_iotag + ip_rp->max_iotag +
264 	    els_rp->max_iotag];
265 
266 
267 #ifdef EMLXS_SPARC
268 	/*
269 	 * Allocate and Initialize FCP MEM_BPL's.
270 	 * This is for increased performance on sparc
271 	 */
272 
273 	bzero(buf_info, sizeof (MBUF_INFO));
274 	buf_info->size = fcp_rp->max_iotag * sizeof (MATCHMAP);
275 	buf_info->align = sizeof (void *);
276 
277 	(void) emlxs_mem_alloc(hba, buf_info);
278 	if (buf_info->virt == NULL) {
279 		mutex_exit(&EMLXS_MEMGET_LOCK);
280 
281 		(void) emlxs_mem_free_buffer(hba);
282 
283 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
284 		    "FCP BPL table buffer.");
285 
286 		return (0);
287 	}
288 	hba->fcp_bpl_table = buf_info->virt;
289 	bzero(hba->fcp_bpl_table, buf_info->size);
290 
291 	bzero(buf_info, sizeof (MBUF_INFO));
292 	buf_info->size = (fcp_rp->max_iotag * (3 * sizeof (ULP_BDE64)));
293 	buf_info->flags = FC_MBUF_DMA;
294 	buf_info->align = 32;
295 
296 	(void) emlxs_mem_alloc(hba, buf_info);
297 	if (buf_info->virt == NULL) {
298 		mutex_exit(&EMLXS_MEMGET_LOCK);
299 
300 		(void) emlxs_mem_free_buffer(hba);
301 
302 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
303 		    "FCP BPL DMA buffers.");
304 
305 		return (0);
306 	}
307 	bzero(buf_info->virt, buf_info->size);
308 
309 	hba->fcp_bpl_mp.size = buf_info->size;
310 	hba->fcp_bpl_mp.virt = buf_info->virt;
311 	hba->fcp_bpl_mp.phys = buf_info->phys;
312 	hba->fcp_bpl_mp.data_handle = buf_info->data_handle;
313 	hba->fcp_bpl_mp.dma_handle = buf_info->dma_handle;
314 	hba->fcp_bpl_mp.tag = NULL;
315 
316 	v_bpl = (ULP_BDE64 *)hba->fcp_bpl_mp.virt;
317 	p_bpl = (ULP_BDE64 *)hba->fcp_bpl_mp.phys;
318 	for (i = 0, j = 0; i < fcp_rp->max_iotag; i++, j += 3) {
319 		matp = &hba->fcp_bpl_table[i];
320 
321 		matp->fc_mptr = NULL;
322 		matp->size = (3 * sizeof (ULP_BDE64));
323 		matp->virt = (uint8_t *)&v_bpl[j];
324 		matp->phys = (uint64_t)&p_bpl[j];
325 		matp->dma_handle = NULL;
326 		matp->data_handle = NULL;
327 		matp->tag = MEM_BPL;
328 		matp->flag |= MAP_TABLE_ALLOCATED;
329 	}
330 #endif /* EMLXS_SPARC */
331 
332 	/*
333 	 * Allocate and Initialize MEM_BPL (3)
334 	 */
335 
336 	mp = &hba->memseg[MEM_BPL];
337 	mp->fc_memsize = hba->mem_bpl_size;	/* Set during attach */
338 	mp->fc_numblks = (uint16_t)cfg[CFG_NUM_IOCBS].current;
339 	mp->fc_memflag = FC_MEM_DMA;
340 	mp->fc_lowmem = (mp->fc_numblks >> 4);
341 	mp->fc_memstart_virt = 0;
342 	mp->fc_memstart_phys = 0;
343 	mp->fc_mem_dma_handle = 0;
344 	mp->fc_mem_dat_handle = 0;
345 	mp->fc_memget_ptr = 0;
346 	mp->fc_memget_end = 0;
347 	mp->fc_memput_ptr = 0;
348 	mp->fc_memput_end = 0;
349 	mp->fc_total_memsize = 0;
350 	mp->fc_memget_cnt = mp->fc_numblks;
351 	mp->fc_memput_cnt = 0;
352 
353 	/* Allocate buffer pools for above buffer structures */
354 	for (i = 0; i < mp->fc_numblks; i++) {
355 		/*
356 		 * If this is a DMA buffer we need alignment on a page
357 		 * so we don't want to worry about buffers spanning page
358 		 * boundries when mapping memory for the adapter.
359 		 */
360 		bzero(buf_info, sizeof (MBUF_INFO));
361 		buf_info->size = sizeof (MATCHMAP);
362 		buf_info->align = sizeof (void *);
363 
364 		(void) emlxs_mem_alloc(hba, buf_info);
365 		if (buf_info->virt == NULL) {
366 			mutex_exit(&EMLXS_MEMGET_LOCK);
367 
368 			(void) emlxs_mem_free_buffer(hba);
369 
370 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
371 			    "BPL segment buffer.");
372 
373 			return (0);
374 		}
375 
376 		matp = (MATCHMAP *)buf_info->virt;
377 		bzero(matp, sizeof (MATCHMAP));
378 
379 		bzero(buf_info, sizeof (MBUF_INFO));
380 		buf_info->size = mp->fc_memsize;
381 		buf_info->flags = FC_MBUF_DMA;
382 		buf_info->align = 32;
383 
384 		(void) emlxs_mem_alloc(hba, buf_info);
385 		if (buf_info->virt == NULL) {
386 			mutex_exit(&EMLXS_MEMGET_LOCK);
387 
388 			(void) emlxs_mem_free_buffer(hba);
389 
390 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
391 			    "BPL DMA buffer.");
392 
393 			return (0);
394 		}
395 		bp = (uint8_t *)buf_info->virt;
396 		bzero(bp, mp->fc_memsize);
397 
398 		/*
399 		 * Link buffer into beginning of list. The first pointer
400 		 * in each buffer is a forward pointer to the next buffer.
401 		 */
402 		oldbp = mp->fc_memget_ptr;
403 
404 		if (oldbp == 0) {
405 			mp->fc_memget_end = (uint8_t *)matp;
406 		}
407 
408 		mp->fc_memget_ptr = (uint8_t *)matp;
409 		matp->fc_mptr = oldbp;
410 		matp->virt = buf_info->virt;
411 		matp->phys = buf_info->phys;
412 		matp->size = buf_info->size;
413 		matp->dma_handle = buf_info->dma_handle;
414 		matp->data_handle = buf_info->data_handle;
415 		matp->tag = MEM_BPL;
416 		matp->flag |= MAP_POOL_ALLOCATED;
417 	}
418 
419 
420 	/*
421 	 * These represent the unsolicited ELS buffers we preallocate.
422 	 */
423 
424 	mp = &hba->memseg[MEM_BUF];
425 	mp->fc_memsize = MEM_BUF_SIZE;
426 	mp->fc_numblks = MEM_ELSBUF_COUNT + MEM_BUF_COUNT;
427 	mp->fc_memflag = FC_MEM_DMA;
428 	mp->fc_lowmem = 3;
429 	mp->fc_memstart_virt = 0;
430 	mp->fc_memstart_phys = 0;
431 	mp->fc_mem_dma_handle = 0;
432 	mp->fc_mem_dat_handle = 0;
433 	mp->fc_memget_ptr = 0;
434 	mp->fc_memget_end = 0;
435 	mp->fc_memput_ptr = 0;
436 	mp->fc_memput_end = 0;
437 	mp->fc_total_memsize = 0;
438 	mp->fc_memget_cnt = mp->fc_numblks;
439 	mp->fc_memput_cnt = 0;
440 
441 	/* Allocate buffer pools for above buffer structures */
442 	for (i = 0; i < mp->fc_numblks; i++) {
443 		/*
444 		 * If this is a DMA buffer we need alignment on a page
445 		 * so we don't want to worry about buffers spanning page
446 		 * boundries when mapping memory for the adapter.
447 		 */
448 		bzero(buf_info, sizeof (MBUF_INFO));
449 		buf_info->size = sizeof (MATCHMAP);
450 		buf_info->align = sizeof (void *);
451 
452 		(void) emlxs_mem_alloc(hba, buf_info);
453 		if (buf_info->virt == NULL) {
454 			mutex_exit(&EMLXS_MEMGET_LOCK);
455 
456 			(void) emlxs_mem_free_buffer(hba);
457 
458 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
459 			    "MEM_BUF Segment buffer.");
460 
461 			return (0);
462 		}
463 
464 		matp = (MATCHMAP *)buf_info->virt;
465 		bzero(matp, sizeof (MATCHMAP));
466 
467 		bzero(buf_info, sizeof (MBUF_INFO));
468 		buf_info->size = mp->fc_memsize;
469 		buf_info->flags = FC_MBUF_DMA;
470 		buf_info->align = 32;
471 
472 		(void) emlxs_mem_alloc(hba, buf_info);
473 		if (buf_info->virt == NULL) {
474 			mutex_exit(&EMLXS_MEMGET_LOCK);
475 
476 			(void) emlxs_mem_free_buffer(hba);
477 
478 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
479 			    "MEM_BUF DMA buffer.");
480 
481 			return (0);
482 		}
483 		bp = (uint8_t *)buf_info->virt;
484 		bzero(bp, mp->fc_memsize);
485 
486 		/*
487 		 * Link buffer into beginning of list. The first pointer
488 		 * in each buffer is a forward pointer to the next buffer.
489 		 */
490 		oldbp = mp->fc_memget_ptr;
491 
492 		if (oldbp == 0) {
493 			mp->fc_memget_end = (uint8_t *)matp;
494 		}
495 
496 		mp->fc_memget_ptr = (uint8_t *)matp;
497 		matp->fc_mptr = oldbp;
498 		matp->virt = buf_info->virt;
499 		matp->phys = buf_info->phys;
500 		matp->size = buf_info->size;
501 		matp->dma_handle = buf_info->dma_handle;
502 		matp->data_handle = buf_info->data_handle;
503 		matp->tag = MEM_BUF;
504 		matp->flag |= MAP_POOL_ALLOCATED;
505 	}
506 
507 
508 	/*
509 	 * These represent the unsolicited IP buffers we preallocate.
510 	 */
511 
512 	mp = &hba->memseg[MEM_IPBUF];
513 	mp->fc_memsize = MEM_IPBUF_SIZE;
514 	mp->fc_numblks = MEM_IPBUF_COUNT;
515 	mp->fc_memflag = FC_MEM_DMA;
516 	mp->fc_lowmem = 3;
517 	mp->fc_memstart_virt = 0;
518 	mp->fc_memstart_phys = 0;
519 	mp->fc_mem_dma_handle = 0;
520 	mp->fc_mem_dat_handle = 0;
521 	mp->fc_memget_ptr = 0;
522 	mp->fc_memget_end = 0;
523 	mp->fc_memput_ptr = 0;
524 	mp->fc_memput_end = 0;
525 	mp->fc_total_memsize = 0;
526 	mp->fc_memget_cnt = mp->fc_numblks;
527 	mp->fc_memput_cnt = 0;
528 
529 	/* Allocate buffer pools for above buffer structures */
530 	for (i = 0; i < mp->fc_numblks; i++) {
531 		/*
532 		 * If this is a DMA buffer we need alignment on a page
533 		 * so we don't want to worry about buffers spanning page
534 		 * boundries when mapping memory for the adapter.
535 		 */
536 		bzero(buf_info, sizeof (MBUF_INFO));
537 		buf_info->size = sizeof (MATCHMAP);
538 		buf_info->align = sizeof (void *);
539 
540 		(void) emlxs_mem_alloc(hba, buf_info);
541 		if (buf_info->virt == NULL) {
542 			mutex_exit(&EMLXS_MEMGET_LOCK);
543 
544 			(void) emlxs_mem_free_buffer(hba);
545 
546 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
547 			    "IP_BUF Segment buffer.");
548 
549 			return (0);
550 		}
551 
552 		matp = (MATCHMAP *)buf_info->virt;
553 		bzero(matp, sizeof (MATCHMAP));
554 
555 		bzero(buf_info, sizeof (MBUF_INFO));
556 		buf_info->size = mp->fc_memsize;
557 		buf_info->flags = FC_MBUF_DMA;
558 		buf_info->align = 32;
559 
560 		(void) emlxs_mem_alloc(hba, buf_info);
561 		if (buf_info->virt == NULL) {
562 			mutex_exit(&EMLXS_MEMGET_LOCK);
563 
564 			(void) emlxs_mem_free_buffer(hba);
565 
566 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
567 			    "IP_BUF DMA buffer.");
568 
569 			return (0);
570 		}
571 		bp = (uint8_t *)buf_info->virt;
572 		bzero(bp, mp->fc_memsize);
573 
574 		/*
575 		 * Link buffer into beginning of list. The first pointer
576 		 * in each buffer is a forward pointer to the next buffer.
577 		 */
578 		oldbp = mp->fc_memget_ptr;
579 
580 		if (oldbp == 0) {
581 			mp->fc_memget_end = (uint8_t *)matp;
582 		}
583 
584 		mp->fc_memget_ptr = (uint8_t *)matp;
585 		matp->fc_mptr = oldbp;
586 		matp->virt = buf_info->virt;
587 		matp->phys = buf_info->phys;
588 		matp->size = buf_info->size;
589 		matp->dma_handle = buf_info->dma_handle;
590 		matp->data_handle = buf_info->data_handle;
591 		matp->tag = MEM_IPBUF;
592 		matp->flag |= MAP_POOL_ALLOCATED;
593 	}
594 
595 	/*
596 	 * These represent the unsolicited CT buffers we preallocate.
597 	 */
598 	mp = &hba->memseg[MEM_CTBUF];
599 	mp->fc_memsize = MEM_CTBUF_SIZE;
600 	mp->fc_numblks = MEM_CTBUF_COUNT;
601 	mp->fc_memflag = FC_MEM_DMA;
602 	mp->fc_lowmem = 0;
603 	mp->fc_memstart_virt = 0;
604 	mp->fc_memstart_phys = 0;
605 	mp->fc_mem_dma_handle = 0;
606 	mp->fc_mem_dat_handle = 0;
607 	mp->fc_memget_ptr = 0;
608 	mp->fc_memget_end = 0;
609 	mp->fc_memput_ptr = 0;
610 	mp->fc_memput_end = 0;
611 	mp->fc_total_memsize = 0;
612 	mp->fc_memget_cnt = mp->fc_numblks;
613 	mp->fc_memput_cnt = 0;
614 
615 	/* Allocate buffer pools for above buffer structures */
616 	for (i = 0; i < mp->fc_numblks; i++) {
617 		/*
618 		 * If this is a DMA buffer we need alignment on a page
619 		 * so we don't want to worry about buffers spanning page
620 		 * boundries when mapping memory for the adapter.
621 		 */
622 		bzero(buf_info, sizeof (MBUF_INFO));
623 		buf_info->size = sizeof (MATCHMAP);
624 		buf_info->align = sizeof (void *);
625 
626 		(void) emlxs_mem_alloc(hba, buf_info);
627 		if (buf_info->virt == NULL) {
628 			mutex_exit(&EMLXS_MEMGET_LOCK);
629 
630 			(void) emlxs_mem_free_buffer(hba);
631 
632 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
633 			    "CT_BUF Segment buffer.");
634 
635 			return (0);
636 		}
637 
638 		matp = (MATCHMAP *)buf_info->virt;
639 		bzero(matp, sizeof (MATCHMAP));
640 
641 		bzero(buf_info, sizeof (MBUF_INFO));
642 		buf_info->size = mp->fc_memsize;
643 		buf_info->flags = FC_MBUF_DMA;
644 		buf_info->align = 32;
645 
646 		(void) emlxs_mem_alloc(hba, buf_info);
647 		if (buf_info->virt == NULL) {
648 			mutex_exit(&EMLXS_MEMGET_LOCK);
649 
650 			(void) emlxs_mem_free_buffer(hba);
651 
652 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
653 			    "CT_BUF DMA buffer.");
654 
655 			return (0);
656 		}
657 		bp = (uint8_t *)buf_info->virt;
658 		bzero(bp, mp->fc_memsize);
659 
660 		/*
661 		 * Link buffer into beginning of list. The first pointer
662 		 * in each buffer is a forward pointer to the next buffer.
663 		 */
664 		oldbp = mp->fc_memget_ptr;
665 
666 		if (oldbp == 0) {
667 			mp->fc_memget_end = (uint8_t *)matp;
668 		}
669 
670 		mp->fc_memget_ptr = (uint8_t *)matp;
671 		matp->fc_mptr = oldbp;
672 		matp->virt = buf_info->virt;
673 		matp->phys = buf_info->phys;
674 		matp->size = buf_info->size;
675 		matp->dma_handle = buf_info->dma_handle;
676 		matp->data_handle = buf_info->data_handle;
677 		matp->tag = MEM_CTBUF;
678 		matp->flag |= MAP_POOL_ALLOCATED;
679 	}
680 
681 #ifdef SFCT_SUPPORT
682 
683 	/*
684 	 * These represent the unsolicited FCT buffers we preallocate.
685 	 */
686 	mp = &hba->memseg[MEM_FCTBUF];
687 	mp->fc_memsize = MEM_FCTBUF_SIZE;
688 	mp->fc_numblks = (hba->tgt_mode) ? MEM_FCTBUF_COUNT : 0;
689 	mp->fc_memflag = FC_MEM_DMA;
690 	mp->fc_lowmem = 0;
691 	mp->fc_memstart_virt = 0;
692 	mp->fc_memstart_phys = 0;
693 	mp->fc_mem_dma_handle = 0;
694 	mp->fc_mem_dat_handle = 0;
695 	mp->fc_memget_ptr = 0;
696 	mp->fc_memget_end = 0;
697 	mp->fc_memput_ptr = 0;
698 	mp->fc_memput_end = 0;
699 	mp->fc_total_memsize = 0;
700 	mp->fc_memget_cnt = mp->fc_numblks;
701 	mp->fc_memput_cnt = 0;
702 
703 	/* Allocate buffer pools for above buffer structures */
704 	for (i = 0; i < mp->fc_numblks; i++) {
705 		/*
706 		 * If this is a DMA buffer we need alignment on a page
707 		 * so we don't want to worry about buffers spanning page
708 		 * boundries when mapping memory for the adapter.
709 		 */
710 		bzero(buf_info, sizeof (MBUF_INFO));
711 		buf_info->size = sizeof (MATCHMAP);
712 		buf_info->align = sizeof (void *);
713 
714 		(void) emlxs_mem_alloc(hba, buf_info);
715 		if (buf_info->virt == NULL) {
716 			mutex_exit(&EMLXS_MEMGET_LOCK);
717 
718 			(void) emlxs_mem_free_buffer(hba);
719 
720 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
721 			    "FCT_BUF Segment buffer.");
722 
723 			return (0);
724 		}
725 
726 		matp = (MATCHMAP *)buf_info->virt;
727 		bzero(matp, sizeof (MATCHMAP));
728 
729 		bzero(buf_info, sizeof (MBUF_INFO));
730 		buf_info->size = mp->fc_memsize;
731 		buf_info->flags = FC_MBUF_DMA;
732 		buf_info->align = 32;
733 
734 		(void) emlxs_mem_alloc(hba, buf_info);
735 		if (buf_info->virt == NULL) {
736 			mutex_exit(&EMLXS_MEMGET_LOCK);
737 
738 			(void) emlxs_mem_free_buffer(hba);
739 
740 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
741 			    "FCT_BUF DMA buffer.");
742 
743 			return (0);
744 		}
745 		bp = (uint8_t *)buf_info->virt;
746 		bzero(bp, mp->fc_memsize);
747 
748 		/*
749 		 * Link buffer into beginning of list. The first pointer
750 		 * in each buffer is a forward pointer to the next buffer.
751 		 */
752 		oldbp = mp->fc_memget_ptr;
753 
754 		if (oldbp == 0) {
755 			mp->fc_memget_end = (uint8_t *)matp;
756 		}
757 
758 		mp->fc_memget_ptr = (uint8_t *)matp;
759 		matp->fc_mptr = oldbp;
760 		matp->virt = buf_info->virt;
761 		matp->phys = buf_info->phys;
762 		matp->size = buf_info->size;
763 		matp->dma_handle = buf_info->dma_handle;
764 		matp->data_handle = buf_info->data_handle;
765 		matp->tag = MEM_FCTBUF;
766 		matp->flag |= MAP_POOL_ALLOCATED;
767 	}
768 #endif /* SFCT_SUPPORT */
769 
770 	for (i = 0; i < FC_MAX_SEG; i++) {
771 		char *seg;
772 
773 		switch (i) {
774 		case MEM_NLP:
775 			seg = "MEM_NLP";
776 			break;
777 		case MEM_IOCB:
778 			seg = "MEM_IOCB";
779 			break;
780 		case MEM_MBOX:
781 			seg = "MEM_MBOX";
782 			break;
783 		case MEM_BPL:
784 			seg = "MEM_BPL";
785 			break;
786 		case MEM_BUF:
787 			seg = "MEM_BUF";
788 			break;
789 		case MEM_IPBUF:
790 			seg = "MEM_IPBUF";
791 			break;
792 		case MEM_CTBUF:
793 			seg = "MEM_CTBUF";
794 			break;
795 #ifdef SFCT_SUPPORT
796 		case MEM_FCTBUF:
797 			seg = "MEM_FCTBUF";
798 			break;
799 #endif /* SFCT_SUPPORT */
800 		default:
801 			break;
802 		}
803 
804 		mp = &hba->memseg[i];
805 
806 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
807 		    "Segment: %s mp=%p size=%x count=%d flags=%x base=%p",
808 		    seg, mp, mp->fc_memsize, mp->fc_numblks, mp->fc_memflag,
809 		    mp->fc_memget_ptr);
810 	}
811 
812 	mutex_exit(&EMLXS_MEMGET_LOCK);
813 
814 	return (1);
815 
816 }  /* emlxs_mem_alloc_buffer() */
817 
818 
819 
820 /*
821  * emlxs_mem_free_buffer
822  *
823  * This routine will free iocb/data buffer space
824  * and TGTM resource.
825  */
826 extern int
827 emlxs_mem_free_buffer(emlxs_hba_t *hba)
828 {
829 	emlxs_port_t *port = &PPORT;
830 	emlxs_port_t *vport;
831 	int32_t j;
832 	uint8_t *bp;
833 	MEMSEG *mp;
834 	MATCHMAP *mm;
835 	RING *rp;
836 	IOCBQ *iocbq;
837 	IOCB *iocb;
838 	MAILBOXQ *mbox, *mbsave;
839 	MBUF_INFO *buf_info;
840 	MBUF_INFO bufinfo;
841 	emlxs_buf_t *sbp;
842 	fc_unsol_buf_t *ubp;
843 	RING *fcp_rp;
844 	RING *ip_rp;
845 	RING *els_rp;
846 	RING *ct_rp;
847 	uint32_t total_iotags;
848 	emlxs_ub_priv_t *ub_priv;
849 
850 	buf_info = &bufinfo;
851 
852 	/* Check for deferred pkt completion */
853 	if (hba->mbox_sbp) {
854 		sbp = (emlxs_buf_t *)hba->mbox_sbp;
855 		hba->mbox_sbp = 0;
856 
857 		emlxs_pkt_complete(sbp, -1, 0, 1);
858 	}
859 
860 	/* Check for deferred ub completion */
861 	if (hba->mbox_ubp) {
862 		ubp = (fc_unsol_buf_t *)hba->mbox_ubp;
863 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
864 		port = ub_priv->port;
865 		hba->mbox_ubp = 0;
866 
867 		emlxs_ub_callback(port, ubp);
868 	}
869 
870 #ifdef NPIV_SUPPORT
871 	/* Special handle for vport PLOGI */
872 	if (hba->mbox_iocbq == (uint8_t *)1) {
873 		hba->mbox_iocbq = NULL;
874 	}
875 #endif /* NPIV_SUPPORT */
876 
877 	/* Check for deferred iocb tx */
878 	if (hba->mbox_iocbq) {	/* iocb */
879 		iocbq = (IOCBQ *)hba->mbox_iocbq;
880 		hba->mbox_iocbq = 0;
881 		iocb = &iocbq->iocb;
882 
883 		/* Set the error status of the iocb */
884 		iocb->ulpStatus = IOSTAT_LOCAL_REJECT;
885 		iocb->un.grsp.perr.statLocalError = IOERR_ABORT_REQUESTED;
886 
887 		switch (iocb->ulpCommand) {
888 		case CMD_FCP_ICMND_CR:
889 		case CMD_FCP_ICMND_CX:
890 		case CMD_FCP_IREAD_CR:
891 		case CMD_FCP_IREAD_CX:
892 		case CMD_FCP_IWRITE_CR:
893 		case CMD_FCP_IWRITE_CX:
894 		case CMD_FCP_ICMND64_CR:
895 		case CMD_FCP_ICMND64_CX:
896 		case CMD_FCP_IREAD64_CR:
897 		case CMD_FCP_IREAD64_CX:
898 		case CMD_FCP_IWRITE64_CR:
899 		case CMD_FCP_IWRITE64_CX:
900 			rp = &hba->ring[FC_FCP_RING];
901 			emlxs_handle_fcp_event(hba, rp, iocbq);
902 			break;
903 
904 		case CMD_ELS_REQUEST_CR:
905 		case CMD_ELS_REQUEST_CX:
906 		case CMD_XMIT_ELS_RSP_CX:
907 		case CMD_ELS_REQUEST64_CR:	/* This is the only one used */
908 						/* for deferred iocb tx */
909 		case CMD_ELS_REQUEST64_CX:
910 		case CMD_XMIT_ELS_RSP64_CX:
911 			rp = &hba->ring[FC_ELS_RING];
912 			(void) emlxs_els_handle_event(hba, rp, iocbq);
913 			break;
914 
915 		case CMD_GEN_REQUEST64_CR:
916 		case CMD_GEN_REQUEST64_CX:
917 			rp = &hba->ring[FC_CT_RING];
918 			(void) emlxs_ct_handle_event(hba, rp, iocbq);
919 			break;
920 
921 		default:
922 			rp = (RING *)iocbq->ring;
923 
924 			if (rp) {
925 				if (rp->ringno == FC_ELS_RING) {
926 					(void) emlxs_mem_put(hba, MEM_ELSBUF,
927 					    (uint8_t *)iocbq->bp);
928 				} else if (rp->ringno == FC_CT_RING) {
929 					(void) emlxs_mem_put(hba, MEM_CTBUF,
930 					    (uint8_t *)iocbq->bp);
931 				} else if (rp->ringno == FC_IP_RING) {
932 					(void) emlxs_mem_put(hba, MEM_IPBUF,
933 					    (uint8_t *)iocbq->bp);
934 				}
935 #ifdef SFCT_SUPPORT
936 				else if (rp->ringno == FC_FCT_RING) {
937 					(void) emlxs_mem_put(hba, MEM_FCTBUF,
938 					    (uint8_t *)iocbq->bp);
939 				}
940 #endif /* SFCT_SUPPORT */
941 
942 			} else if (iocbq->bp) {
943 				(void) emlxs_mem_put(hba, MEM_BUF,
944 				    (uint8_t *)iocbq->bp);
945 			}
946 
947 			if (!iocbq->sbp) {
948 				(void) emlxs_mem_put(hba, MEM_IOCB,
949 				    (uint8_t *)iocbq);
950 			}
951 		}
952 	}
953 
954 	/* free the mapped address match area for each ring */
955 	for (j = 0; j < hba->ring_count; j++) {
956 		rp = &hba->ring[j];
957 
958 		/* Flush the ring */
959 		(void) emlxs_tx_ring_flush(hba, rp, 0);
960 
961 		while (rp->fc_mpoff) {
962 			uint64_t addr;
963 
964 			addr = 0;
965 			mm = (MATCHMAP *)(rp->fc_mpoff);
966 
967 			if ((j == FC_ELS_RING) || (j == FC_CT_RING) ||
968 #ifdef SFCT_SUPPORT
969 			    (j == FC_FCT_RING) ||
970 #endif /* SFCT_SUPPORT */
971 			    (j == FC_IP_RING)) {
972 				addr = mm->phys;
973 			}
974 
975 			if ((mm = emlxs_mem_get_vaddr(hba, rp, addr))) {
976 				if (j == FC_ELS_RING) {
977 					(void) emlxs_mem_put(hba, MEM_ELSBUF,
978 					    (uint8_t *)mm);
979 				} else if (j == FC_CT_RING) {
980 					(void) emlxs_mem_put(hba, MEM_CTBUF,
981 					    (uint8_t *)mm);
982 				} else if (j == FC_IP_RING) {
983 					(void) emlxs_mem_put(hba, MEM_IPBUF,
984 					    (uint8_t *)mm);
985 				}
986 #ifdef SFCT_SUPPORT
987 				else if (j == FC_FCT_RING) {
988 					(void) emlxs_mem_put(hba, MEM_FCTBUF,
989 					    (uint8_t *)mm);
990 				}
991 #endif /* SFCT_SUPPORT */
992 
993 			}
994 		}
995 	}
996 
997 #ifdef SLI3_SUPPORT
998 	if (hba->flag & FC_HBQ_ENABLED) {
999 		emlxs_hbq_free_all(hba, EMLXS_ELS_HBQ_ID);
1000 		emlxs_hbq_free_all(hba, EMLXS_IP_HBQ_ID);
1001 		emlxs_hbq_free_all(hba, EMLXS_CT_HBQ_ID);
1002 #ifdef SFCT_SUPPORT
1003 		if (hba->tgt_mode) {
1004 			emlxs_hbq_free_all(hba, EMLXS_FCT_HBQ_ID);
1005 		}
1006 #endif /* SFCT_SUPPORT */
1007 
1008 	}
1009 #endif /* SLI3_SUPPORT */
1010 
1011 	/* Free everything on mbox queue */
1012 	mbox = (MAILBOXQ *)(hba->mbox_queue.q_first);
1013 	while (mbox) {
1014 		mbsave = mbox;
1015 		mbox = (MAILBOXQ *)mbox->next;
1016 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbsave);
1017 	}
1018 	hba->mbox_queue.q_first = NULL;
1019 	hba->mbox_queue.q_last = NULL;
1020 	hba->mbox_queue.q_cnt = 0;
1021 	hba->mbox_queue_flag = 0;
1022 
1023 	/* Free the nodes */
1024 	for (j = 0; j < MAX_VPORTS; j++) {
1025 		vport = &VPORT(j);
1026 		if (vport->node_count) {
1027 			emlxs_node_destroy_all(vport);
1028 		}
1029 	}
1030 
1031 	/* Free memory associated with all buffers on get buffer pool */
1032 	if (hba->iotag_table) {
1033 		fcp_rp = &hba->ring[FC_FCP_RING];
1034 		ip_rp = &hba->ring[FC_IP_RING];
1035 		els_rp = &hba->ring[FC_ELS_RING];
1036 		ct_rp = &hba->ring[FC_CT_RING];
1037 
1038 		total_iotags = fcp_rp->max_iotag + ip_rp->max_iotag +
1039 		    els_rp->max_iotag + ct_rp->max_iotag;
1040 
1041 		bzero(buf_info, sizeof (MBUF_INFO));
1042 		buf_info->size = total_iotags * sizeof (emlxs_buf_t *);
1043 		buf_info->virt = hba->iotag_table;
1044 		emlxs_mem_free(hba, buf_info);
1045 
1046 		hba->iotag_table = 0;
1047 	}
1048 #ifdef EMLXS_SPARC
1049 	if (hba->fcp_bpl_table) {
1050 		bzero(buf_info, sizeof (MBUF_INFO));
1051 		buf_info->size = fcp_rp->max_iotag * sizeof (MATCHMAP);
1052 		buf_info->virt = hba->fcp_bpl_table;
1053 		emlxs_mem_free(hba, buf_info);
1054 
1055 		hba->fcp_bpl_table = 0;
1056 	}
1057 
1058 	if (hba->fcp_bpl_mp.virt) {
1059 		bzero(buf_info, sizeof (MBUF_INFO));
1060 		buf_info->size = hba->fcp_bpl_mp.size;
1061 		buf_info->virt = hba->fcp_bpl_mp.virt;
1062 		buf_info->phys = hba->fcp_bpl_mp.phys;
1063 		buf_info->dma_handle = hba->fcp_bpl_mp.dma_handle;
1064 		buf_info->data_handle = hba->fcp_bpl_mp.data_handle;
1065 		buf_info->flags = FC_MBUF_DMA;
1066 		emlxs_mem_free(hba, buf_info);
1067 
1068 		bzero(&hba->fcp_bpl_mp, sizeof (MATCHMAP));
1069 	}
1070 #endif /* EMLXS_SPARC */
1071 
1072 	/* Free the memory segments */
1073 	for (j = 0; j < FC_MAX_SEG; j++) {
1074 		mp = &hba->memseg[j];
1075 
1076 		/* MEM_NLP, MEM_IOCB, MEM_MBOX */
1077 		if (j < MEM_BPL) {
1078 			if (mp->fc_memstart_virt) {
1079 				kmem_free(mp->fc_memstart_virt,
1080 				    mp->fc_total_memsize);
1081 				bzero((char *)mp, sizeof (MEMSEG));
1082 			}
1083 
1084 			continue;
1085 		}
1086 
1087 		/*
1088 		 * MEM_BPL, MEM_BUF, MEM_ELSBUF,
1089 		 * MEM_IPBUF, MEM_CTBUF, MEM_FCTBUF
1090 		 */
1091 
1092 		/* Free memory associated with all buffers on get buffer pool */
1093 		mutex_enter(&EMLXS_MEMGET_LOCK);
1094 		while ((bp = mp->fc_memget_ptr) != NULL) {
1095 			mp->fc_memget_ptr = *((uint8_t **)bp);
1096 			mm = (MATCHMAP *)bp;
1097 
1098 			bzero(buf_info, sizeof (MBUF_INFO));
1099 			buf_info->size = mm->size;
1100 			buf_info->virt = mm->virt;
1101 			buf_info->phys = mm->phys;
1102 			buf_info->dma_handle = mm->dma_handle;
1103 			buf_info->data_handle = mm->data_handle;
1104 			buf_info->flags = FC_MBUF_DMA;
1105 			emlxs_mem_free(hba, buf_info);
1106 
1107 			bzero(buf_info, sizeof (MBUF_INFO));
1108 			buf_info->size = sizeof (MATCHMAP);
1109 			buf_info->virt = (uint32_t *)mm;
1110 			emlxs_mem_free(hba, buf_info);
1111 		}
1112 		mutex_exit(&EMLXS_MEMGET_LOCK);
1113 
1114 		/* Free memory associated with all buffers on put buffer pool */
1115 		mutex_enter(&EMLXS_MEMPUT_LOCK);
1116 		while ((bp = mp->fc_memput_ptr) != NULL) {
1117 			mp->fc_memput_ptr = *((uint8_t **)bp);
1118 			mm = (MATCHMAP *)bp;
1119 
1120 			bzero(buf_info, sizeof (MBUF_INFO));
1121 			buf_info->size = mm->size;
1122 			buf_info->virt = mm->virt;
1123 			buf_info->phys = mm->phys;
1124 			buf_info->dma_handle = mm->dma_handle;
1125 			buf_info->data_handle = mm->data_handle;
1126 			buf_info->flags = FC_MBUF_DMA;
1127 			emlxs_mem_free(hba, buf_info);
1128 
1129 			bzero(buf_info, sizeof (MBUF_INFO));
1130 			buf_info->size = sizeof (MATCHMAP);
1131 			buf_info->virt = (uint32_t *)mm;
1132 			emlxs_mem_free(hba, buf_info);
1133 		}
1134 		mutex_exit(&EMLXS_MEMPUT_LOCK);
1135 		bzero((char *)mp, sizeof (MEMSEG));
1136 	}
1137 
1138 	return (0);
1139 
1140 }  /* emlxs_mem_free_buffer() */
1141 
1142 
1143 extern uint8_t *
1144 emlxs_mem_buf_alloc(emlxs_hba_t *hba)
1145 {
1146 	emlxs_port_t *port = &PPORT;
1147 	uint8_t *bp = NULL;
1148 	MATCHMAP *matp = NULL;
1149 	MBUF_INFO *buf_info;
1150 	MBUF_INFO bufinfo;
1151 
1152 	buf_info = &bufinfo;
1153 
1154 	bzero(buf_info, sizeof (MBUF_INFO));
1155 	buf_info->size = sizeof (MATCHMAP);
1156 	buf_info->align = sizeof (void *);
1157 
1158 	(void) emlxs_mem_alloc(hba, buf_info);
1159 	if (buf_info->virt == NULL) {
1160 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1161 		    "MEM_BUF_ALLOC buffer.");
1162 
1163 		return (0);
1164 	}
1165 
1166 	matp = (MATCHMAP *)buf_info->virt;
1167 	bzero(matp, sizeof (MATCHMAP));
1168 
1169 	bzero(buf_info, sizeof (MBUF_INFO));
1170 	buf_info->size = MEM_BUF_SIZE;
1171 	buf_info->flags = FC_MBUF_DMA;
1172 	buf_info->align = 32;
1173 
1174 	(void) emlxs_mem_alloc(hba, buf_info);
1175 	if (buf_info->virt == NULL) {
1176 
1177 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1178 		    "MEM_BUF_ALLOC DMA buffer.");
1179 
1180 		/* Free the matp object */
1181 		bzero(buf_info, sizeof (MBUF_INFO));
1182 		buf_info->size = sizeof (MATCHMAP);
1183 		buf_info->virt = (uint32_t *)matp;
1184 		emlxs_mem_free(hba, buf_info);
1185 
1186 		return (0);
1187 	}
1188 	bp = (uint8_t *)buf_info->virt;
1189 	bzero(bp, MEM_BUF_SIZE);
1190 
1191 	matp->fc_mptr = NULL;
1192 	matp->virt = buf_info->virt;
1193 	matp->phys = buf_info->phys;
1194 	matp->size = buf_info->size;
1195 	matp->dma_handle = buf_info->dma_handle;
1196 	matp->data_handle = buf_info->data_handle;
1197 	matp->tag = MEM_BUF;
1198 	matp->flag |= MAP_BUF_ALLOCATED;
1199 
1200 	return ((uint8_t *)matp);
1201 
1202 }  /* emlxs_mem_buf_alloc() */
1203 
1204 
1205 extern uint8_t *
1206 emlxs_mem_buf_free(emlxs_hba_t *hba, uint8_t *bp)
1207 {
1208 	MATCHMAP *matp;
1209 	MBUF_INFO bufinfo;
1210 	MBUF_INFO *buf_info;
1211 
1212 	buf_info = &bufinfo;
1213 
1214 	matp = (MATCHMAP *)bp;
1215 
1216 	if (!(matp->flag & MAP_BUF_ALLOCATED)) {
1217 		return (NULL);
1218 	}
1219 
1220 	bzero(buf_info, sizeof (MBUF_INFO));
1221 	buf_info->size = matp->size;
1222 	buf_info->virt = matp->virt;
1223 	buf_info->phys = matp->phys;
1224 	buf_info->dma_handle = matp->dma_handle;
1225 	buf_info->data_handle = matp->data_handle;
1226 	buf_info->flags = FC_MBUF_DMA;
1227 	emlxs_mem_free(hba, buf_info);
1228 
1229 	bzero(buf_info, sizeof (MBUF_INFO));
1230 	buf_info->size = sizeof (MATCHMAP);
1231 	buf_info->virt = (uint32_t *)matp;
1232 	emlxs_mem_free(hba, buf_info);
1233 
1234 	return (bp);
1235 
1236 }  /* emlxs_mem_buf_free() */
1237 
1238 
1239 
1240 /*
1241  * emlxs_mem_get
1242  *
1243  * This routine will get a free memory buffer.
1244  * seg identifies which buffer pool to use.
1245  * Returns the free buffer ptr or 0 for no buf
1246  */
1247 extern uint8_t *
1248 emlxs_mem_get(emlxs_hba_t *hba, uint32_t arg)
1249 {
1250 	emlxs_port_t *port = &PPORT;
1251 	MEMSEG *mp;
1252 	uint8_t *bp = NULL;
1253 	uint32_t seg = arg & MEM_SEG_MASK;
1254 	MAILBOXQ *mbq;
1255 	MATCHMAP *matp;
1256 	IOCBQ *iocbq;
1257 	NODELIST *node;
1258 	uint8_t *base;
1259 	uint8_t *end;
1260 
1261 	/* range check on seg argument */
1262 	if (seg >= FC_MAX_SEG) {
1263 		return (NULL);
1264 	}
1265 
1266 	mp = &hba->memseg[seg];
1267 
1268 	/* Check if memory segment destroyed! */
1269 	if (mp->fc_memsize == 0) {
1270 		return (NULL);
1271 	}
1272 
1273 	mutex_enter(&EMLXS_MEMGET_LOCK);
1274 
1275 top:
1276 
1277 	if (mp->fc_memget_ptr) {
1278 		bp = mp->fc_memget_ptr;
1279 
1280 		/* Checking if seg == MEM_MBOX, MEM_IOCB or MEM_NLP */
1281 		/* Verify buffer is in this memory region */
1282 		if (mp->fc_memstart_virt && mp->fc_total_memsize) {
1283 			base = mp->fc_memstart_virt;
1284 			end = mp->fc_memstart_virt + mp->fc_total_memsize;
1285 			if (bp < base || bp >= end) {
1286 				/* Invalidate the the get list */
1287 				mp->fc_memget_ptr = NULL;
1288 				mp->fc_memget_end = NULL;
1289 				mp->fc_memget_cnt = 0;
1290 
1291 				EMLXS_MSGF(EMLXS_CONTEXT,
1292 				    &emlxs_pool_error_msg,
1293 				    "Corruption detected: seg=%x bp=%p "
1294 				    "base=%p end=%p.", seg, bp, base, end);
1295 
1296 				emlxs_ffstate_change(hba, FC_ERROR);
1297 
1298 				mutex_exit(&EMLXS_MEMGET_LOCK);
1299 
1300 				thread_create(NULL, 0, emlxs_shutdown_thread,
1301 				    (char *)hba, 0, &p0, TS_RUN,
1302 				    v.v_maxsyspri - 2);
1303 
1304 				return (NULL);
1305 			}
1306 		}
1307 
1308 		/*
1309 		 * If a memory block exists, take it off freelist
1310 		 * and return it to the user.
1311 		 */
1312 		if (mp->fc_memget_end == bp) {
1313 			mp->fc_memget_ptr = NULL;
1314 			mp->fc_memget_end = NULL;
1315 			mp->fc_memget_cnt = 0;
1316 
1317 		} else {
1318 			/*
1319 			 * Pointer to the next free buffer
1320 			 */
1321 			mp->fc_memget_ptr = *((uint8_t **)bp);
1322 			mp->fc_memget_cnt--;
1323 		}
1324 
1325 		switch (seg) {
1326 		case MEM_MBOX:
1327 			bzero(bp, sizeof (MAILBOXQ));
1328 
1329 			mbq = (MAILBOXQ *)bp;
1330 			mbq->flag |= MBQ_POOL_ALLOCATED;
1331 			break;
1332 
1333 		case MEM_IOCB:
1334 			bzero(bp, sizeof (IOCBQ));
1335 
1336 			iocbq = (IOCBQ *)bp;
1337 			iocbq->flag |= IOCB_POOL_ALLOCATED;
1338 			break;
1339 
1340 		case MEM_NLP:
1341 			bzero(bp, sizeof (NODELIST));
1342 
1343 			node = (NODELIST *)bp;
1344 			node->flag |= NODE_POOL_ALLOCATED;
1345 			break;
1346 
1347 		case MEM_BPL:
1348 		case MEM_BUF:	/* MEM_ELSBUF */
1349 		case MEM_IPBUF:
1350 		case MEM_CTBUF:
1351 #ifdef SFCT_SUPPORT
1352 		case MEM_FCTBUF:
1353 #endif /* SFCT_SUPPORT */
1354 		default:
1355 			matp = (MATCHMAP *)bp;
1356 			matp->fc_mptr = NULL;
1357 			matp->flag |= MAP_POOL_ALLOCATED;
1358 			break;
1359 		}
1360 	} else {
1361 		mutex_enter(&EMLXS_MEMPUT_LOCK);
1362 		if (mp->fc_memput_ptr) {
1363 			/*
1364 			 * Move buffer from memput to memget
1365 			 */
1366 			mp->fc_memget_ptr = mp->fc_memput_ptr;
1367 			mp->fc_memget_end = mp->fc_memput_end;
1368 			mp->fc_memget_cnt = mp->fc_memput_cnt;
1369 			mp->fc_memput_ptr = NULL;
1370 			mp->fc_memput_end = NULL;
1371 			mp->fc_memput_cnt = 0;
1372 			mutex_exit(&EMLXS_MEMPUT_LOCK);
1373 
1374 			goto top;
1375 		}
1376 		mutex_exit(&EMLXS_MEMPUT_LOCK);
1377 
1378 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_alloc_failed_msg,
1379 		    "Pool empty: seg=%x lowmem=%x free=%x", seg,
1380 		    mp->fc_lowmem, mp->fc_memget_cnt);
1381 
1382 		/* HBASTATS.memAllocErr++; */
1383 	}
1384 
1385 	mutex_exit(&EMLXS_MEMGET_LOCK);
1386 
1387 	return (bp);
1388 
1389 }  /* emlxs_mem_get() */
1390 
1391 
1392 
1393 extern uint8_t *
1394 emlxs_mem_put(emlxs_hba_t *hba, uint32_t seg, uint8_t *bp)
1395 {
1396 	emlxs_port_t *port = &PPORT;
1397 	MEMSEG *mp;
1398 	uint8_t *oldbp;
1399 	MATCHMAP *matp;
1400 	IOCBQ *iocbq;
1401 	MAILBOXQ *mbq;
1402 	NODELIST *node;
1403 	uint8_t *base;
1404 	uint8_t *end;
1405 
1406 	if (!bp) {
1407 		return (NULL);
1408 	}
1409 
1410 	/* Check on seg argument */
1411 	if (seg >= FC_MAX_SEG) {
1412 		return (NULL);
1413 	}
1414 
1415 	mp = &hba->memseg[seg];
1416 
1417 	switch (seg) {
1418 	case MEM_MBOX:
1419 		mbq = (MAILBOXQ *)bp;
1420 
1421 		if (!(mbq->flag & MBQ_POOL_ALLOCATED)) {
1422 			return (bp);
1423 		}
1424 		break;
1425 
1426 	case MEM_IOCB:
1427 		iocbq = (IOCBQ *)bp;
1428 
1429 		/* Check to make sure the IOCB is pool allocated */
1430 		if (!(iocbq->flag & IOCB_POOL_ALLOCATED)) {
1431 			return (bp);
1432 		}
1433 
1434 		/* Any IOCBQ with a packet attached did not come */
1435 		/* from our pool */
1436 		if (iocbq->sbp) {
1437 			return (bp);
1438 		}
1439 		break;
1440 
1441 	case MEM_NLP:
1442 		node = (NODELIST *)bp;
1443 
1444 		/* Check to make sure the NODE is pool allocated */
1445 		if (!(node->flag & NODE_POOL_ALLOCATED)) {
1446 			return (bp);
1447 		}
1448 		break;
1449 
1450 	case MEM_BPL:
1451 	case MEM_BUF:	/* MEM_ELSBUF */
1452 	case MEM_IPBUF:
1453 	case MEM_CTBUF:
1454 #ifdef SFCT_SUPPORT
1455 	case MEM_FCTBUF:
1456 #endif /* SFCT_SUPPORT */
1457 	default:
1458 		matp = (MATCHMAP *)bp;
1459 
1460 		if (matp->flag & MAP_BUF_ALLOCATED) {
1461 			return (emlxs_mem_buf_free(hba, bp));
1462 		}
1463 
1464 		if (matp->flag & MAP_TABLE_ALLOCATED) {
1465 			return (bp);
1466 		}
1467 
1468 		/* Check to make sure the MATCHMAP is pool allocated */
1469 		if (!(matp->flag & MAP_POOL_ALLOCATED)) {
1470 			return (bp);
1471 		}
1472 		break;
1473 	}
1474 
1475 	/* Free the pool object */
1476 	mutex_enter(&EMLXS_MEMPUT_LOCK);
1477 
1478 	/* Check if memory segment destroyed! */
1479 	if (mp->fc_memsize == 0) {
1480 		mutex_exit(&EMLXS_MEMPUT_LOCK);
1481 		return (NULL);
1482 	}
1483 
1484 	/* Check if buffer was just freed */
1485 	if (mp->fc_memput_ptr == bp) {
1486 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1487 		    "Freeing Free object: seg=%x bp=%p", seg, bp);
1488 
1489 		mutex_exit(&EMLXS_MEMPUT_LOCK);
1490 		return (NULL);
1491 	}
1492 
1493 	/* Validate the buffer */
1494 
1495 	/* Checking if seg == MEM_BUF, MEM_BPL, MEM_CTBUF, */
1496 	/* MEM_IPBUF or MEM_FCTBUF */
1497 	if (mp->fc_memflag & FC_MEM_DMA) {
1498 		if (matp->tag != seg) {
1499 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1500 			    "Corruption detected: seg=%x tag=%x bp=%p", seg,
1501 			    matp->tag, bp);
1502 
1503 			emlxs_ffstate_change(hba, FC_ERROR);
1504 
1505 			mutex_exit(&EMLXS_MEMPUT_LOCK);
1506 
1507 			thread_create(NULL, 0, emlxs_shutdown_thread,
1508 			    (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
1509 
1510 			return (NULL);
1511 		}
1512 	}
1513 
1514 	/* Checking (seg == MEM_MBOX || seg == MEM_IOCB || seg == MEM_NLP) */
1515 	else if (mp->fc_memstart_virt && mp->fc_total_memsize) {
1516 		base = mp->fc_memstart_virt;
1517 		end = mp->fc_memstart_virt + mp->fc_total_memsize;
1518 		if (bp < base || bp >= end) {
1519 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1520 			    "Corruption detected: seg=%x bp=%p base=%p end=%p",
1521 			    seg, bp, base, end);
1522 
1523 			emlxs_ffstate_change(hba, FC_ERROR);
1524 
1525 			mutex_exit(&EMLXS_MEMPUT_LOCK);
1526 
1527 			thread_create(NULL, 0, emlxs_shutdown_thread,
1528 			    (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
1529 
1530 			return (NULL);
1531 		}
1532 	}
1533 
1534 	/* Release to the first place of the freelist */
1535 	oldbp = mp->fc_memput_ptr;
1536 	mp->fc_memput_ptr = bp;
1537 	*((uint8_t **)bp) = oldbp;
1538 
1539 	if (oldbp == NULL) {
1540 		mp->fc_memput_end = bp;
1541 		mp->fc_memput_cnt = 1;
1542 	} else {
1543 		mp->fc_memput_cnt++;
1544 	}
1545 
1546 	mutex_exit(&EMLXS_MEMPUT_LOCK);
1547 
1548 	return (bp);
1549 
1550 }  /* emlxs_mem_put() */
1551 
1552 
1553 
1554 /*
1555  * Look up the virtual address given a mapped address
1556  */
1557 extern MATCHMAP *
1558 emlxs_mem_get_vaddr(emlxs_hba_t *hba, RING *rp, uint64_t mapbp)
1559 {
1560 	emlxs_port_t *port = &PPORT;
1561 	MATCHMAP *prev;
1562 	MATCHMAP *mp;
1563 
1564 	switch (rp->ringno) {
1565 	case FC_ELS_RING:
1566 		mp = (MATCHMAP *)rp->fc_mpoff;
1567 		prev = 0;
1568 
1569 		while (mp) {
1570 			if (mp->phys == mapbp) {
1571 				if (prev == 0) {
1572 					rp->fc_mpoff = mp->fc_mptr;
1573 				} else {
1574 					prev->fc_mptr = mp->fc_mptr;
1575 				}
1576 
1577 				if (rp->fc_mpon == (uint8_t *)mp) {
1578 					rp->fc_mpon = (uint8_t *)prev;
1579 				}
1580 
1581 				mp->fc_mptr = 0;
1582 
1583 				emlxs_mpdata_sync(mp->dma_handle, 0, mp->size,
1584 				    DDI_DMA_SYNC_FORKERNEL);
1585 
1586 				HBASTATS.ElsUbPosted--;
1587 
1588 				return (mp);
1589 			}
1590 
1591 			prev = mp;
1592 			mp = (MATCHMAP *)mp->fc_mptr;
1593 		}
1594 
1595 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1596 		    "ELS Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1597 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1598 
1599 		break;
1600 
1601 	case FC_CT_RING:
1602 		mp = (MATCHMAP *)rp->fc_mpoff;
1603 		prev = 0;
1604 
1605 		while (mp) {
1606 			if (mp->phys == mapbp) {
1607 				if (prev == 0) {
1608 					rp->fc_mpoff = mp->fc_mptr;
1609 				} else {
1610 					prev->fc_mptr = mp->fc_mptr;
1611 				}
1612 
1613 				if (rp->fc_mpon == (uint8_t *)mp) {
1614 					rp->fc_mpon = (uint8_t *)prev;
1615 				}
1616 
1617 				mp->fc_mptr = 0;
1618 
1619 				emlxs_mpdata_sync(mp->dma_handle, 0, mp->size,
1620 				    DDI_DMA_SYNC_FORKERNEL);
1621 
1622 				HBASTATS.CtUbPosted--;
1623 
1624 				return (mp);
1625 			}
1626 
1627 			prev = mp;
1628 			mp = (MATCHMAP *)mp->fc_mptr;
1629 		}
1630 
1631 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1632 		    "CT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1633 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1634 
1635 		break;
1636 
1637 	case FC_IP_RING:
1638 		mp = (MATCHMAP *)rp->fc_mpoff;
1639 		prev = 0;
1640 
1641 		while (mp) {
1642 			if (mp->phys == mapbp) {
1643 				if (prev == 0) {
1644 					rp->fc_mpoff = mp->fc_mptr;
1645 				} else {
1646 					prev->fc_mptr = mp->fc_mptr;
1647 				}
1648 
1649 				if (rp->fc_mpon == (uint8_t *)mp) {
1650 					rp->fc_mpon = (uint8_t *)prev;
1651 				}
1652 
1653 				mp->fc_mptr = 0;
1654 
1655 				emlxs_mpdata_sync(mp->dma_handle, 0, mp->size,
1656 				    DDI_DMA_SYNC_FORKERNEL);
1657 
1658 				HBASTATS.IpUbPosted--;
1659 
1660 				return (mp);
1661 			}
1662 
1663 			prev = mp;
1664 			mp = (MATCHMAP *)mp->fc_mptr;
1665 		}
1666 
1667 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1668 		    "IP Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1669 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1670 
1671 		break;
1672 
1673 #ifdef SFCT_SUPPORT
1674 	case FC_FCT_RING:
1675 		mp = (MATCHMAP *)rp->fc_mpoff;
1676 		prev = 0;
1677 
1678 		while (mp) {
1679 			if (mp->phys == mapbp) {
1680 				if (prev == 0) {
1681 					rp->fc_mpoff = mp->fc_mptr;
1682 				} else {
1683 					prev->fc_mptr = mp->fc_mptr;
1684 				}
1685 
1686 				if (rp->fc_mpon == (uint8_t *)mp) {
1687 					rp->fc_mpon = (uint8_t *)prev;
1688 				}
1689 
1690 				mp->fc_mptr = 0;
1691 
1692 				emlxs_mpdata_sync(mp->dma_handle, 0, mp->size,
1693 				    DDI_DMA_SYNC_FORKERNEL);
1694 
1695 				HBASTATS.FctUbPosted--;
1696 
1697 				return (mp);
1698 			}
1699 
1700 			prev = mp;
1701 			mp = (MATCHMAP *)mp->fc_mptr;
1702 		}
1703 
1704 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1705 		    "FCT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1706 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1707 
1708 		break;
1709 #endif /* SFCT_SUPPORT */
1710 	}
1711 
1712 	return (0);
1713 
1714 }  /* emlxs_mem_get_vaddr() */
1715 
1716 
1717 /*
1718  * Given a virtual address bp, generate the physical mapped address and
1719  * place it where addr points to. Save the address pair for lookup later.
1720  */
1721 extern void
1722 emlxs_mem_map_vaddr(emlxs_hba_t *hba, RING *rp, MATCHMAP *mp,
1723     uint32_t *haddr, uint32_t *laddr)
1724 {
1725 	switch (rp->ringno) {
1726 	case FC_ELS_RING:
1727 		/*
1728 		 * Update slot fc_mpon points to then bump it
1729 		 * fc_mpoff is pointer head of the list.
1730 		 * fc_mpon is pointer tail of the list.
1731 		 */
1732 		mp->fc_mptr = 0;
1733 		if (rp->fc_mpoff == 0) {
1734 			rp->fc_mpoff = (uint8_t *)mp;
1735 			rp->fc_mpon = (uint8_t *)mp;
1736 		} else {
1737 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1738 			    (uint8_t *)mp;
1739 			rp->fc_mpon = (uint8_t *)mp;
1740 		}
1741 
1742 		if (hba->flag & FC_SLIM2_MODE) {
1743 
1744 			/* return mapped address */
1745 			*haddr = putPaddrHigh(mp->phys);
1746 			/* return mapped address */
1747 			*laddr = putPaddrLow(mp->phys);
1748 		} else {
1749 			/* return mapped address */
1750 			*laddr = putPaddrLow(mp->phys);
1751 		}
1752 
1753 		HBASTATS.ElsUbPosted++;
1754 
1755 		break;
1756 
1757 	case FC_CT_RING:
1758 		/*
1759 		 * Update slot fc_mpon points to then bump it
1760 		 * fc_mpoff is pointer head of the list.
1761 		 * fc_mpon is pointer tail of the list.
1762 		 */
1763 		mp->fc_mptr = 0;
1764 		if (rp->fc_mpoff == 0) {
1765 			rp->fc_mpoff = (uint8_t *)mp;
1766 			rp->fc_mpon = (uint8_t *)mp;
1767 		} else {
1768 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1769 			    (uint8_t *)mp;
1770 			rp->fc_mpon = (uint8_t *)mp;
1771 		}
1772 
1773 		if (hba->flag & FC_SLIM2_MODE) {
1774 			/* return mapped address */
1775 			*haddr = putPaddrHigh(mp->phys);
1776 			/* return mapped address */
1777 			*laddr = putPaddrLow(mp->phys);
1778 		} else {
1779 			/* return mapped address */
1780 			*laddr = putPaddrLow(mp->phys);
1781 		}
1782 
1783 		HBASTATS.CtUbPosted++;
1784 
1785 		break;
1786 
1787 
1788 	case FC_IP_RING:
1789 		/*
1790 		 * Update slot fc_mpon points to then bump it
1791 		 * fc_mpoff is pointer head of the list.
1792 		 * fc_mpon is pointer tail of the list.
1793 		 */
1794 		mp->fc_mptr = 0;
1795 		if (rp->fc_mpoff == 0) {
1796 			rp->fc_mpoff = (uint8_t *)mp;
1797 			rp->fc_mpon = (uint8_t *)mp;
1798 		} else {
1799 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1800 			    (uint8_t *)mp;
1801 			rp->fc_mpon = (uint8_t *)mp;
1802 		}
1803 
1804 		if (hba->flag & FC_SLIM2_MODE) {
1805 			/* return mapped address */
1806 			*haddr = putPaddrHigh(mp->phys);
1807 			*laddr = putPaddrLow(mp->phys);
1808 		} else {
1809 			*laddr = putPaddrLow(mp->phys);
1810 		}
1811 
1812 		HBASTATS.IpUbPosted++;
1813 		break;
1814 
1815 
1816 #ifdef SFCT_SUPPORT
1817 	case FC_FCT_RING:
1818 		/*
1819 		 * Update slot fc_mpon points to then bump it
1820 		 * fc_mpoff is pointer head of the list.
1821 		 * fc_mpon is pointer tail of the list.
1822 		 */
1823 		mp->fc_mptr = 0;
1824 		if (rp->fc_mpoff == 0) {
1825 			rp->fc_mpoff = (uint8_t *)mp;
1826 			rp->fc_mpon = (uint8_t *)mp;
1827 		} else {
1828 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1829 			    (uint8_t *)mp;
1830 			rp->fc_mpon = (uint8_t *)mp;
1831 		}
1832 
1833 		if (hba->flag & FC_SLIM2_MODE) {
1834 			/* return mapped address */
1835 			*haddr = putPaddrHigh(mp->phys);
1836 			/* return mapped address */
1837 			*laddr = putPaddrLow(mp->phys);
1838 		} else {
1839 			/* return mapped address */
1840 			*laddr = putPaddrLow(mp->phys);
1841 		}
1842 
1843 		HBASTATS.FctUbPosted++;
1844 		break;
1845 #endif /* SFCT_SUPPORT */
1846 	}
1847 }  /* emlxs_mem_map_vaddr() */
1848 
1849 
1850 #ifdef SLI3_SUPPORT
1851 
1852 uint32_t
1853 emlxs_hbq_alloc(emlxs_hba_t *hba, uint32_t hbq_id)
1854 {
1855 	emlxs_port_t *port = &PPORT;
1856 	HBQ_INIT_t *hbq;
1857 	MBUF_INFO *buf_info;
1858 	MBUF_INFO bufinfo;
1859 
1860 	hbq = &hba->hbq_table[hbq_id];
1861 
1862 	if (hbq->HBQ_host_buf.virt == 0) {
1863 		buf_info = &bufinfo;
1864 
1865 		/* Get the system's page size in a DDI-compliant way. */
1866 		bzero(buf_info, sizeof (MBUF_INFO));
1867 		buf_info->size = hbq->HBQ_numEntries * sizeof (HBQE_t);
1868 		buf_info->flags = FC_MBUF_DMA;
1869 		buf_info->align = 4096;
1870 
1871 		(void) emlxs_mem_alloc(hba, buf_info);
1872 
1873 		if (buf_info->virt == NULL) {
1874 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
1875 			    "Unable to alloc HBQ.");
1876 			return (ENOMEM);
1877 		}
1878 
1879 		hbq->HBQ_host_buf.virt = (void *)buf_info->virt;
1880 		hbq->HBQ_host_buf.phys = buf_info->phys;
1881 		hbq->HBQ_host_buf.data_handle = buf_info->data_handle;
1882 		hbq->HBQ_host_buf.dma_handle = buf_info->dma_handle;
1883 		hbq->HBQ_host_buf.size = buf_info->size;
1884 		hbq->HBQ_host_buf.tag = hbq_id;
1885 
1886 		bzero((char *)hbq->HBQ_host_buf.virt, buf_info->size);
1887 	}
1888 
1889 	return (0);
1890 
1891 }  /* emlxs_hbq_alloc() */
1892 
1893 
1894 #endif /* SLI3_SUPPORT */
1895