1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 
28 #include <emlxs.h>
29 
30 EMLXS_MSG_DEF(EMLXS_MEM_C);
31 
32 
33 extern int32_t
34 emlxs_mem_alloc_buffer(emlxs_hba_t *hba)
35 {
36 	emlxs_port_t *port = &PPORT;
37 	emlxs_config_t *cfg;
38 	MBUF_INFO *buf_info;
39 	MEMSEG *seg;
40 	MBUF_INFO bufinfo;
41 	int32_t i;
42 	int32_t cnt;
43 #ifdef EMLXS_SPARC
44 	MATCHMAP *mp;
45 	MATCHMAP **fcp_bpl_table;
46 #endif	/* EMLXS_SPARC */
47 
48 	buf_info = &bufinfo;
49 	cfg = &CFG;
50 
51 	bzero(hba->memseg, sizeof (hba->memseg));
52 
53 	/*
54 	 * Initialize fc_table
55 	 */
56 	cnt = cfg[CFG_NUM_IOTAGS].current;
57 	if (cnt) {
58 		hba->max_iotag = cnt;
59 	}
60 	/* ioatg 0 is not used, iotags 1 thru max_iotag-1 are used */
61 
62 	/* Allocate the fc_table */
63 	bzero(buf_info, sizeof (MBUF_INFO));
64 	buf_info->size = (hba->max_iotag * sizeof (emlxs_buf_t *));
65 
66 	(void) emlxs_mem_alloc(hba, buf_info);
67 	if (buf_info->virt == NULL) {
68 
69 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
70 		    "fc_table buffer.");
71 
72 		goto failed;
73 	}
74 	hba->fc_table = buf_info->virt;
75 	bzero(hba->fc_table, buf_info->size);
76 
77 #ifdef EMLXS_SPARC
78 	if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK)) {
79 	/*
80 	 * Allocate and Initialize FCP MEM_BPL table
81 	 * This is for increased performance on sparc
82 	 */
83 	bzero(buf_info, sizeof (MBUF_INFO));
84 	buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
85 
86 	(void) emlxs_mem_alloc(hba, buf_info);
87 	if (buf_info->virt == NULL) {
88 
89 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
90 		    "FCP BPL table buffer.");
91 
92 		goto failed;
93 	}
94 	hba->sli.sli3.fcp_bpl_table = buf_info->virt;
95 	bzero(hba->sli.sli3.fcp_bpl_table, buf_info->size);
96 
97 	/* Allocate a pool of BPLs for the FCP MEM_BPL table */
98 	seg = &hba->sli.sli3.fcp_bpl_seg;
99 	bzero(seg, sizeof (MEMSEG));
100 	(void) strcpy(seg->fc_label, "FCP BPL Pool");
101 	seg->fc_memtag	= MEM_BPL;
102 	seg->fc_memsize	= (3 * sizeof (ULP_BDE64));
103 	seg->fc_numblks	= hba->max_iotag;
104 	seg->fc_reserved = 0;
105 	seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
106 	seg->fc_memalign = 32;
107 
108 	if (emlxs_mem_pool_alloc(hba, seg) == NULL) {
109 		goto failed;
110 	}
111 
112 	/* Initialize the FCP MEM_BPL table */
113 	fcp_bpl_table = (MATCHMAP**)hba->sli.sli3.fcp_bpl_table;
114 	mp = (MATCHMAP*)seg->fc_memget_ptr;
115 	for (i = 0; i < seg->fc_numblks; i++) {
116 		mp->flag |= MAP_TABLE_ALLOCATED;
117 		*fcp_bpl_table = mp;
118 
119 		mp = (MATCHMAP *)mp->fc_mptr;
120 		fcp_bpl_table++;
121 	}
122 	}
123 #endif /* EMLXS_SPARC */
124 
125 	/* Prepare the memory pools */
126 	for (i = 0; i < FC_MAX_SEG; i++) {
127 		seg = &hba->memseg[i];
128 
129 		switch (i) {
130 		case MEM_NLP:
131 			(void) strcpy(seg->fc_label, "Node Pool");
132 			seg->fc_memtag	= MEM_NLP;
133 			seg->fc_memsize	= sizeof (NODELIST);
134 			seg->fc_numblks	= (int16_t)hba->max_nodes + 2;
135 			seg->fc_reserved = 0;
136 			seg->fc_memflag	= 0;
137 			break;
138 
139 		case MEM_IOCB:
140 			(void) strcpy(seg->fc_label, "IOCB Pool");
141 			seg->fc_memtag	= MEM_IOCB;
142 			seg->fc_memsize	= sizeof (IOCBQ);
143 			seg->fc_numblks	= (uint16_t)cfg[CFG_NUM_IOCBS].current;
144 			seg->fc_reserved = 0;
145 			seg->fc_memflag	= 0;
146 			break;
147 
148 		case MEM_MBOX:
149 			(void) strcpy(seg->fc_label, "MBOX Pool");
150 			seg->fc_memtag	= MEM_MBOX;
151 			seg->fc_memsize	= sizeof (MAILBOXQ);
152 			seg->fc_numblks	= (int16_t)hba->max_nodes + 32;
153 			seg->fc_reserved = 0;
154 			seg->fc_memflag	= 0;
155 			break;
156 
157 		case MEM_BPL:
158 			if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
159 				continue;
160 			}
161 			(void) strcpy(seg->fc_label, "BPL Pool");
162 			seg->fc_memtag	= MEM_BPL;
163 			seg->fc_memsize	= hba->sli.sli3.mem_bpl_size;
164 			seg->fc_numblks	= (int16_t)hba->max_iotag + 2;
165 			seg->fc_reserved = 0;
166 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
167 			seg->fc_memalign = 32;
168 			break;
169 
170 		case MEM_BUF:
171 			/* These are the unsolicited ELS buffers. */
172 			(void) strcpy(seg->fc_label, "BUF Pool");
173 			seg->fc_memtag	= MEM_BUF;
174 			seg->fc_memsize	= MEM_BUF_SIZE;
175 			seg->fc_numblks	= MEM_ELSBUF_COUNT + MEM_BUF_COUNT;
176 			seg->fc_reserved = 0;
177 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
178 			seg->fc_memalign = 32;
179 			break;
180 
181 		case MEM_IPBUF:
182 			/* These are the unsolicited IP buffers. */
183 			if (cfg[CFG_NETWORK_ON].current == 0) {
184 				continue;
185 			}
186 
187 			(void) strcpy(seg->fc_label, "IPBUF Pool");
188 			seg->fc_memtag	= MEM_IPBUF;
189 			seg->fc_memsize	= MEM_IPBUF_SIZE;
190 			seg->fc_numblks	= MEM_IPBUF_COUNT;
191 			seg->fc_reserved = 0;
192 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
193 			seg->fc_memalign = 32;
194 			break;
195 
196 		case MEM_CTBUF:
197 			/* These are the unsolicited CT buffers. */
198 			(void) strcpy(seg->fc_label, "CTBUF Pool");
199 			seg->fc_memtag	= MEM_CTBUF;
200 			seg->fc_memsize	= MEM_CTBUF_SIZE;
201 			seg->fc_numblks	= MEM_CTBUF_COUNT;
202 			seg->fc_reserved = 0;
203 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
204 			seg->fc_memalign = 32;
205 			break;
206 
207 		case MEM_FCTBUF:
208 #ifdef SFCT_SUPPORT
209 			/* These are the unsolicited FCT buffers. */
210 			if (hba->tgt_mode == 0) {
211 				continue;
212 			}
213 
214 			(void) strcpy(seg->fc_label, "FCTBUF Pool");
215 			seg->fc_memtag	= MEM_FCTBUF;
216 			seg->fc_memsize	= MEM_FCTBUF_SIZE;
217 			seg->fc_numblks	= MEM_FCTBUF_COUNT;
218 			seg->fc_reserved = 0;
219 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
220 			seg->fc_memalign = 32;
221 #endif /* SFCT_SUPPORT */
222 			break;
223 
224 		default:
225 			continue;
226 		}
227 
228 		if (seg->fc_memsize == 0) {
229 			continue;
230 		}
231 
232 		if (emlxs_mem_pool_alloc(hba, seg) == NULL) {
233 			goto failed;
234 		}
235 
236 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
237 		    "%s: seg=%p size=%x count=%d flags=%x base=%p",
238 		    seg->fc_label, seg, seg->fc_memsize, seg->fc_numblks,
239 		    seg->fc_memflag, seg->fc_memget_ptr);
240 	}
241 
242 	return (1);
243 
244 failed:
245 
246 	(void) emlxs_mem_free_buffer(hba);
247 	return (0);
248 
249 } /* emlxs_mem_alloc_buffer() */
250 
251 
252 /*
253  * emlxs_mem_free_buffer
254  *
255  * This routine will free iocb/data buffer space
256  * and TGTM resource.
257  */
258 extern int
259 emlxs_mem_free_buffer(emlxs_hba_t *hba)
260 {
261 	emlxs_port_t *vport;
262 	int32_t j;
263 	MATCHMAP *mp;
264 	CHANNEL *cp;
265 	RING *rp;
266 	MBUF_INFO *buf_info;
267 	MBUF_INFO bufinfo;
268 
269 	buf_info = &bufinfo;
270 
271 	for (j = 0; j < hba->chan_count; j++) {
272 		cp = &hba->chan[j];
273 
274 		/* Flush the ring */
275 		(void) emlxs_tx_channel_flush(hba, cp, 0);
276 	}
277 
278 	if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK)) {
279 		/* free the mapped address match area for each ring */
280 		for (j = 0; j < MAX_RINGS; j++) {
281 			rp = &hba->sli.sli3.ring[j];
282 
283 			while (rp->fc_mpoff) {
284 				uint64_t addr;
285 
286 				addr = 0;
287 				mp = (MATCHMAP *)(rp->fc_mpoff);
288 
289 				if ((j == hba->channel_els) ||
290 				    (j == hba->channel_ct) ||
291 #ifdef SFCT_SUPPORT
292 				    (j == hba->CHANNEL_FCT) ||
293 #endif /* SFCT_SUPPORT */
294 				    (j == hba->channel_ip)) {
295 					addr = mp->phys;
296 				}
297 
298 				if ((mp = emlxs_mem_get_vaddr(hba, rp, addr))) {
299 					if (j == hba->channel_els) {
300 						(void) emlxs_mem_put(hba,
301 						    MEM_ELSBUF, (uint8_t *)mp);
302 					} else if (j == hba->channel_ct) {
303 						(void) emlxs_mem_put(hba,
304 						    MEM_CTBUF, (uint8_t *)mp);
305 					} else if (j == hba->channel_ip) {
306 						(void) emlxs_mem_put(hba,
307 						    MEM_IPBUF, (uint8_t *)mp);
308 					}
309 #ifdef SFCT_SUPPORT
310 					else if (j == hba->CHANNEL_FCT) {
311 						(void) emlxs_mem_put(hba,
312 						    MEM_FCTBUF, (uint8_t *)mp);
313 					}
314 #endif /* SFCT_SUPPORT */
315 
316 				}
317 			}
318 		}
319 	}
320 
321 	if (hba->flag & FC_HBQ_ENABLED) {
322 		emlxs_hbq_free_all(hba, EMLXS_ELS_HBQ_ID);
323 		emlxs_hbq_free_all(hba, EMLXS_IP_HBQ_ID);
324 		emlxs_hbq_free_all(hba, EMLXS_CT_HBQ_ID);
325 
326 		if (hba->tgt_mode) {
327 			emlxs_hbq_free_all(hba, EMLXS_FCT_HBQ_ID);
328 		}
329 	}
330 
331 	/* Free the nodes */
332 	for (j = 0; j < MAX_VPORTS; j++) {
333 		vport = &VPORT(j);
334 		if (vport->node_count) {
335 			emlxs_node_destroy_all(vport);
336 		}
337 	}
338 
339 	/* Make sure the mailbox queue is empty */
340 	emlxs_mb_flush(hba);
341 
342 	/* Free memory associated with all buffers on get buffer pool */
343 	if (hba->fc_table) {
344 		bzero(buf_info, sizeof (MBUF_INFO));
345 		buf_info->size = hba->max_iotag * sizeof (emlxs_buf_t *);
346 		buf_info->virt = hba->fc_table;
347 		emlxs_mem_free(hba, buf_info);
348 		hba->fc_table = NULL;
349 	}
350 
351 #ifdef EMLXS_SPARC
352 	if (hba->sli.sli3.fcp_bpl_table) {
353 		bzero(buf_info, sizeof (MBUF_INFO));
354 		buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
355 		buf_info->virt = hba->sli.sli3.fcp_bpl_table;
356 		emlxs_mem_free(hba, buf_info);
357 		hba->sli.sli3.fcp_bpl_table = NULL;
358 	}
359 
360 	if (hba->sli.sli3.fcp_bpl_seg.fc_memsize) {
361 		emlxs_mem_pool_free(hba, &hba->sli.sli3.fcp_bpl_seg);
362 		bzero(&hba->sli.sli3.fcp_bpl_seg, sizeof (MEMSEG));
363 	}
364 #endif /* EMLXS_SPARC */
365 
366 	/* Free the memory segments */
367 	for (j = 0; j < FC_MAX_SEG; j++) {
368 		emlxs_mem_pool_free(hba, &hba->memseg[j]);
369 	}
370 
371 	return (0);
372 
373 } /* emlxs_mem_free_buffer() */
374 
375 
376 extern MEMSEG *
377 emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg)
378 {
379 	emlxs_port_t *port = &PPORT;
380 	uint8_t *bp = NULL;
381 	MATCHMAP *mp = NULL;
382 	MBUF_INFO *buf_info;
383 	MBUF_INFO local_buf_info;
384 	uint32_t i;
385 
386 	buf_info = &local_buf_info;
387 
388 	mutex_enter(&EMLXS_MEMGET_LOCK);
389 	mutex_enter(&EMLXS_MEMPUT_LOCK);
390 
391 	/* Calculate total memory size */
392 	seg->fc_total_memsize = (seg->fc_memsize * seg->fc_numblks);
393 
394 	if (seg->fc_total_memsize == 0) {
395 		mutex_exit(&EMLXS_MEMPUT_LOCK);
396 		mutex_exit(&EMLXS_MEMGET_LOCK);
397 		return (NULL);
398 	}
399 
400 	if (!(seg->fc_memflag & FC_MBUF_DMA)) {
401 		goto vmem_pool;
402 	}
403 
404 /* dma_pool */
405 
406 	for (i = 0; i < seg->fc_numblks; i++) {
407 		bzero(buf_info, sizeof (MBUF_INFO));
408 		buf_info->size = sizeof (MATCHMAP);
409 		buf_info->align = sizeof (void *);
410 
411 		(void) emlxs_mem_alloc(hba, buf_info);
412 		if (buf_info->virt == NULL) {
413 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
414 			    "%s desc[%d]. size=%d", seg->fc_label, i,
415 			    buf_info->size);
416 
417 			goto failed;
418 		}
419 
420 		mp = (MATCHMAP *)buf_info->virt;
421 		bzero(mp, sizeof (MATCHMAP));
422 
423 		bzero(buf_info, sizeof (MBUF_INFO));
424 		buf_info->size  = seg->fc_memsize;
425 		buf_info->flags = seg->fc_memflag;
426 		buf_info->align = seg->fc_memalign;
427 
428 		(void) emlxs_mem_alloc(hba, buf_info);
429 		if (buf_info->virt == NULL) {
430 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
431 			    "%s buffer[%d]. size=%d", seg->fc_label, i,
432 			    buf_info->size);
433 
434 			/* Free the mp object */
435 			bzero(buf_info, sizeof (MBUF_INFO));
436 			buf_info->size = sizeof (MATCHMAP);
437 			buf_info->virt = (uint32_t *)mp;
438 			emlxs_mem_free(hba, buf_info);
439 
440 			goto failed;
441 		}
442 		bp = (uint8_t *)buf_info->virt;
443 		bzero(bp, seg->fc_memsize);
444 
445 		mp->virt = buf_info->virt;
446 		mp->phys = buf_info->phys;
447 		mp->size = buf_info->size;
448 		mp->dma_handle = buf_info->dma_handle;
449 		mp->data_handle = buf_info->data_handle;
450 		mp->tag = seg->fc_memtag;
451 		mp->segment = seg;
452 		mp->flag |= MAP_POOL_ALLOCATED;
453 
454 		/* Add the buffer desc to the tail of the pool freelist */
455 		if (seg->fc_memget_end == NULL) {
456 			seg->fc_memget_ptr = (uint8_t *)mp;
457 			seg->fc_memget_cnt = 1;
458 		} else {
459 			*((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)mp;
460 			seg->fc_memget_cnt++;
461 		}
462 		seg->fc_memget_end = (uint8_t *)mp;
463 	}
464 
465 	mutex_exit(&EMLXS_MEMPUT_LOCK);
466 	mutex_exit(&EMLXS_MEMGET_LOCK);
467 	return (seg);
468 
469 vmem_pool:
470 
471 	mutex_exit(&EMLXS_MEMPUT_LOCK);
472 	mutex_exit(&EMLXS_MEMGET_LOCK);
473 
474 	seg->fc_memstart_virt = kmem_zalloc(seg->fc_total_memsize, KM_SLEEP);
475 
476 	mutex_enter(&EMLXS_MEMGET_LOCK);
477 	mutex_enter(&EMLXS_MEMPUT_LOCK);
478 
479 	if (seg->fc_memstart_virt == NULL) {
480 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
481 		    "%s base. size=%d", seg->fc_label,
482 		    seg->fc_total_memsize);
483 
484 		goto failed;
485 	}
486 
487 	bp = (uint8_t *)seg->fc_memstart_virt;
488 	for (i = 0; i < seg->fc_numblks; i++) {
489 
490 		/* Add the buffer to the tail of the pool freelist */
491 		if (seg->fc_memget_end == NULL) {
492 			seg->fc_memget_ptr = (uint8_t *)bp;
493 			seg->fc_memget_cnt = 1;
494 		} else {
495 			*((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)bp;
496 			seg->fc_memget_cnt++;
497 		}
498 		seg->fc_memget_end = (uint8_t *)bp;
499 
500 		bp += seg->fc_memsize;
501 	}
502 
503 	mutex_exit(&EMLXS_MEMPUT_LOCK);
504 	mutex_exit(&EMLXS_MEMGET_LOCK);
505 	return (seg);
506 
507 failed:
508 
509 	mutex_exit(&EMLXS_MEMPUT_LOCK);
510 	mutex_exit(&EMLXS_MEMGET_LOCK);
511 	emlxs_mem_pool_free(hba, seg);
512 	return (NULL);
513 
514 } /* emlxs_mem_pool_alloc() */
515 
516 
517 extern void
518 emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg)
519 {
520 	emlxs_port_t *port = &PPORT;
521 	uint8_t *bp = NULL;
522 	MATCHMAP *mp = NULL;
523 	MBUF_INFO *buf_info;
524 	MBUF_INFO local_buf_info;
525 	MEMSEG segment;
526 	uint32_t free;
527 
528 	/* Save a local copy of the segment and */
529 	/* destroy the original outside of locks */
530 	mutex_enter(&EMLXS_MEMGET_LOCK);
531 	mutex_enter(&EMLXS_MEMPUT_LOCK);
532 
533 	free = seg->fc_memget_cnt + seg->fc_memput_cnt;
534 	if (free < seg->fc_numblks) {
535 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
536 		    "emlxs_mem_pool_free: %s not full. (%d < %d)",
537 		    seg->fc_label, free, seg->fc_numblks);
538 	}
539 
540 	bcopy(seg, &segment, sizeof (MEMSEG));
541 	bzero((char *)seg, sizeof (MEMSEG));
542 	seg = &segment;
543 
544 	mutex_exit(&EMLXS_MEMPUT_LOCK);
545 	mutex_exit(&EMLXS_MEMGET_LOCK);
546 
547 	/* Now free the memory  */
548 
549 	if (!(seg->fc_memflag & FC_MBUF_DMA)) {
550 		if (seg->fc_memstart_virt) {
551 			kmem_free(seg->fc_memstart_virt, seg->fc_total_memsize);
552 		}
553 
554 		return;
555 	}
556 
557 	buf_info = &local_buf_info;
558 
559 	/* Free memory associated with all buffers on get buffer pool */
560 	while ((bp = seg->fc_memget_ptr) != NULL) {
561 		seg->fc_memget_ptr = *((uint8_t **)bp);
562 		mp = (MATCHMAP *)bp;
563 
564 		bzero(buf_info, sizeof (MBUF_INFO));
565 		buf_info->size = mp->size;
566 		buf_info->virt = mp->virt;
567 		buf_info->phys = mp->phys;
568 		buf_info->dma_handle = mp->dma_handle;
569 		buf_info->data_handle = mp->data_handle;
570 		buf_info->flags = seg->fc_memflag;
571 		emlxs_mem_free(hba, buf_info);
572 
573 		bzero(buf_info, sizeof (MBUF_INFO));
574 		buf_info->size = sizeof (MATCHMAP);
575 		buf_info->virt = (uint32_t *)mp;
576 		emlxs_mem_free(hba, buf_info);
577 	}
578 
579 	/* Free memory associated with all buffers on put buffer pool */
580 	while ((bp = seg->fc_memput_ptr) != NULL) {
581 		seg->fc_memput_ptr = *((uint8_t **)bp);
582 		mp = (MATCHMAP *)bp;
583 
584 		bzero(buf_info, sizeof (MBUF_INFO));
585 		buf_info->size = mp->size;
586 		buf_info->virt = mp->virt;
587 		buf_info->phys = mp->phys;
588 		buf_info->dma_handle = mp->dma_handle;
589 		buf_info->data_handle = mp->data_handle;
590 		buf_info->flags = seg->fc_memflag;
591 		emlxs_mem_free(hba, buf_info);
592 
593 		bzero(buf_info, sizeof (MBUF_INFO));
594 		buf_info->size = sizeof (MATCHMAP);
595 		buf_info->virt = (uint32_t *)mp;
596 		emlxs_mem_free(hba, buf_info);
597 	}
598 
599 	return;
600 
601 } /* emlxs_mem_pool_free() */
602 
603 
604 extern uint8_t *
605 emlxs_mem_pool_get(emlxs_hba_t *hba, MEMSEG *seg, uint32_t priority)
606 {
607 	emlxs_port_t *port = &PPORT;
608 	uint8_t *bp = NULL;
609 	MATCHMAP *mp;
610 	uint32_t free;
611 
612 	mutex_enter(&EMLXS_MEMGET_LOCK);
613 
614 	/* Check if memory segment destroyed! */
615 	if (seg->fc_total_memsize == 0) {
616 		mutex_exit(&EMLXS_MEMGET_LOCK);
617 		return (NULL);
618 	}
619 
620 	/* Check priority and reserved status */
621 	if ((priority == 0) && seg->fc_reserved) {
622 		free = seg->fc_memget_cnt + seg->fc_memput_cnt;
623 		if (free <= seg->fc_reserved) {
624 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_alloc_failed_msg,
625 			    "%s low. (%d <= %d)", seg->fc_label,
626 			    free, seg->fc_reserved);
627 
628 			mutex_exit(&EMLXS_MEMGET_LOCK);
629 			return (NULL);
630 		}
631 	}
632 
633 top:
634 
635 	if (seg->fc_memget_ptr) {
636 
637 		bp = seg->fc_memget_ptr;
638 
639 		/* Remove buffer from freelist */
640 		if (seg->fc_memget_end == bp) {
641 			seg->fc_memget_ptr = NULL;
642 			seg->fc_memget_end = NULL;
643 			seg->fc_memget_cnt = 0;
644 
645 		} else {
646 			seg->fc_memget_ptr = *((uint8_t **)bp);
647 			seg->fc_memget_cnt--;
648 		}
649 
650 		if (!(seg->fc_memflag & FC_MBUF_DMA)) {
651 			bzero(bp, seg->fc_memsize);
652 		} else {
653 			mp = (MATCHMAP *)bp;
654 			mp->fc_mptr = NULL;
655 			mp->flag |= MAP_POOL_ALLOCATED;
656 		}
657 
658 	} else {
659 		mutex_enter(&EMLXS_MEMPUT_LOCK);
660 		if (seg->fc_memput_ptr) {
661 			/*
662 			 * Move list from memput to memget
663 			 */
664 			seg->fc_memget_ptr = seg->fc_memput_ptr;
665 			seg->fc_memget_end = seg->fc_memput_end;
666 			seg->fc_memget_cnt = seg->fc_memput_cnt;
667 			seg->fc_memput_ptr = NULL;
668 			seg->fc_memput_end = NULL;
669 			seg->fc_memput_cnt = 0;
670 			mutex_exit(&EMLXS_MEMPUT_LOCK);
671 
672 			goto top;
673 		}
674 		mutex_exit(&EMLXS_MEMPUT_LOCK);
675 
676 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_alloc_failed_msg,
677 		    "%s empty.", seg->fc_label);
678 	}
679 
680 	mutex_exit(&EMLXS_MEMGET_LOCK);
681 
682 	return (bp);
683 
684 } /* emlxs_mem_pool_get() */
685 
686 
687 extern MEMSEG *
688 emlxs_mem_pool_put(emlxs_hba_t *hba, MEMSEG *seg, uint8_t *bp)
689 {
690 	emlxs_port_t *port = &PPORT;
691 	MATCHMAP *mp;
692 	uint8_t *base;
693 	uint8_t *end;
694 
695 	/* Free the pool object */
696 	mutex_enter(&EMLXS_MEMPUT_LOCK);
697 
698 	/* Check if memory segment destroyed! */
699 	if (seg->fc_total_memsize == 0) {
700 		mutex_exit(&EMLXS_MEMPUT_LOCK);
701 		return (NULL);
702 	}
703 
704 	/* Check if buffer was just freed */
705 	if (seg->fc_memput_ptr == bp) {
706 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
707 		    "%s: Freeing free object: bp=%p", seg->fc_label, bp);
708 
709 		mutex_exit(&EMLXS_MEMPUT_LOCK);
710 		return (NULL);
711 	}
712 
713 	/* Validate the buffer belongs to this pool */
714 	if (seg->fc_memflag & FC_MBUF_DMA) {
715 		mp = (MATCHMAP *)bp;
716 
717 		if (!(mp->flag & MAP_POOL_ALLOCATED) ||
718 		    (mp->segment != seg)) {
719 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
720 			    "emlxs_mem_pool_put: %s invalid: mp=%p " \
721 			    "tag=0x%x flag=%x", seg->fc_label,
722 			    mp, mp->tag, mp->flag);
723 
724 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
725 
726 			mutex_exit(&EMLXS_MEMPUT_LOCK);
727 
728 			emlxs_thread_spawn(hba, emlxs_shutdown_thread,
729 			    NULL, NULL);
730 
731 			return (NULL);
732 		}
733 
734 	} else { /* Vmem_pool */
735 		base = seg->fc_memstart_virt;
736 		end = seg->fc_memstart_virt + seg->fc_total_memsize;
737 
738 		if (bp < base || bp >= end) {
739 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
740 			    "emlxs_mem_pool_put: %s Invalid: bp=%p base=%p " \
741 			    "end=%p", seg->fc_label,
742 			    bp, base, end);
743 
744 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
745 
746 			mutex_exit(&EMLXS_MEMPUT_LOCK);
747 
748 			emlxs_thread_spawn(hba, emlxs_shutdown_thread,
749 			    NULL, NULL);
750 
751 			return (NULL);
752 		}
753 	}
754 
755 	/* Release buffer to the end of the freelist */
756 	if (seg->fc_memput_end == NULL) {
757 		seg->fc_memput_ptr = bp;
758 		seg->fc_memput_cnt = 1;
759 	} else {
760 		*((uint8_t **)(seg->fc_memput_end)) = bp;
761 		seg->fc_memput_cnt++;
762 	}
763 	seg->fc_memput_end = bp;
764 	*((uint8_t **)(bp)) = NULL;
765 
766 	mutex_exit(&EMLXS_MEMPUT_LOCK);
767 
768 	return (seg);
769 
770 } /* emlxs_mem_pool_put() */
771 
772 
773 extern MATCHMAP *
774 emlxs_mem_buf_alloc(emlxs_hba_t *hba, uint32_t size)
775 {
776 	emlxs_port_t *port = &PPORT;
777 	uint8_t *bp = NULL;
778 	MATCHMAP *mp = NULL;
779 	MBUF_INFO *buf_info;
780 	MBUF_INFO bufinfo;
781 
782 	buf_info = &bufinfo;
783 
784 	bzero(buf_info, sizeof (MBUF_INFO));
785 	buf_info->size = sizeof (MATCHMAP);
786 	buf_info->align = sizeof (void *);
787 
788 	(void) emlxs_mem_alloc(hba, buf_info);
789 	if (buf_info->virt == NULL) {
790 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
791 		    "MEM_BUF_ALLOC buffer.");
792 
793 		return (NULL);
794 	}
795 
796 	mp = (MATCHMAP *)buf_info->virt;
797 	bzero(mp, sizeof (MATCHMAP));
798 
799 	bzero(buf_info, sizeof (MBUF_INFO));
800 	buf_info->size = size;
801 	buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
802 	buf_info->align = 32;
803 
804 	(void) emlxs_mem_alloc(hba, buf_info);
805 	if (buf_info->virt == NULL) {
806 
807 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
808 		    "MEM_BUF_ALLOC DMA buffer.");
809 
810 		/* Free the mp object */
811 		bzero(buf_info, sizeof (MBUF_INFO));
812 		buf_info->size = sizeof (MATCHMAP);
813 		buf_info->virt = (uint32_t *)mp;
814 		emlxs_mem_free(hba, buf_info);
815 
816 		return (0);
817 	}
818 	bp = (uint8_t *)buf_info->virt;
819 	bzero(bp, MEM_BUF_SIZE);
820 
821 	mp->virt = buf_info->virt;
822 	mp->phys = buf_info->phys;
823 	mp->size = buf_info->size;
824 	mp->dma_handle = buf_info->dma_handle;
825 	mp->data_handle = buf_info->data_handle;
826 	mp->tag = MEM_BUF;
827 	mp->flag |= MAP_BUF_ALLOCATED;
828 
829 	return (mp);
830 
831 } /* emlxs_mem_buf_alloc() */
832 
833 
834 extern MATCHMAP *
835 emlxs_mem_buf_free(emlxs_hba_t *hba, MATCHMAP *mp)
836 {
837 	MBUF_INFO bufinfo;
838 	MBUF_INFO *buf_info;
839 
840 	buf_info = &bufinfo;
841 
842 	if (!(mp->flag & MAP_BUF_ALLOCATED)) {
843 		return (NULL);
844 	}
845 
846 	bzero(buf_info, sizeof (MBUF_INFO));
847 	buf_info->size = mp->size;
848 	buf_info->virt = mp->virt;
849 	buf_info->phys = mp->phys;
850 	buf_info->dma_handle = mp->dma_handle;
851 	buf_info->data_handle = mp->data_handle;
852 	buf_info->flags = FC_MBUF_DMA;
853 	emlxs_mem_free(hba, buf_info);
854 
855 	bzero(buf_info, sizeof (MBUF_INFO));
856 	buf_info->size = sizeof (MATCHMAP);
857 	buf_info->virt = (uint32_t *)mp;
858 	emlxs_mem_free(hba, buf_info);
859 
860 	return (mp);
861 
862 } /* emlxs_mem_buf_free() */
863 
864 
865 extern uint8_t *
866 emlxs_mem_get(emlxs_hba_t *hba, uint32_t seg_id, uint32_t priority)
867 {
868 	emlxs_port_t *port = &PPORT;
869 	uint8_t *bp;
870 	MAILBOXQ *mbq;
871 	IOCBQ *iocbq;
872 	NODELIST *node;
873 	MEMSEG *seg;
874 
875 	if (seg_id >= FC_MAX_SEG) {
876 
877 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
878 		    "emlxs_mem_get: Invalid segment id = %d",
879 		    seg_id);
880 
881 		return (NULL);
882 	}
883 	seg = &hba->memseg[seg_id];
884 
885 	/* Alloc a buffer from the pool */
886 	bp = emlxs_mem_pool_get(hba, seg, priority);
887 
888 	if (bp) {
889 		switch (seg_id) {
890 		case MEM_MBOX:
891 			mbq = (MAILBOXQ *)bp;
892 			mbq->flag |= MBQ_POOL_ALLOCATED;
893 			break;
894 
895 		case MEM_IOCB:
896 			iocbq = (IOCBQ *)bp;
897 			iocbq->flag |= IOCB_POOL_ALLOCATED;
898 			break;
899 
900 		case MEM_NLP:
901 			node = (NODELIST *)bp;
902 			node->flag |= NODE_POOL_ALLOCATED;
903 			break;
904 		}
905 	}
906 
907 	return (bp);
908 
909 } /* emlxs_mem_get() */
910 
911 
912 extern uint8_t *
913 emlxs_mem_put(emlxs_hba_t *hba, uint32_t seg_id, uint8_t *bp)
914 {
915 	emlxs_port_t *port = &PPORT;
916 	MAILBOXQ *mbq;
917 	IOCBQ *iocbq;
918 	NODELIST *node;
919 	MEMSEG *seg;
920 	MATCHMAP *mp;
921 
922 	if (seg_id >= FC_MAX_SEG) {
923 
924 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
925 		    "emlxs_mem_put: Invalid segment id = %d: bp=%p",
926 		    seg_id, bp);
927 
928 		return (NULL);
929 	}
930 	seg = &hba->memseg[seg_id];
931 
932 	/* Verify buffer */
933 	switch (seg_id) {
934 	case MEM_MBOX:
935 		mbq = (MAILBOXQ *)bp;
936 
937 		if (!(mbq->flag & MBQ_POOL_ALLOCATED)) {
938 			return (NULL);
939 		}
940 		break;
941 
942 	case MEM_IOCB:
943 		iocbq = (IOCBQ *)bp;
944 
945 		if (!(iocbq->flag & IOCB_POOL_ALLOCATED)) {
946 			return (NULL);
947 		}
948 
949 		/* Any IOCBQ with a packet attached did not come */
950 		/* from our pool */
951 		if (iocbq->sbp) {
952 			return (NULL);
953 		}
954 		break;
955 
956 	case MEM_NLP:
957 		node = (NODELIST *)bp;
958 
959 		if (!(node->flag & NODE_POOL_ALLOCATED)) {
960 			return (NULL);
961 		}
962 		break;
963 
964 	default:
965 		mp = (MATCHMAP *)bp;
966 
967 		if (mp->flag & MAP_BUF_ALLOCATED) {
968 			return ((uint8_t *)emlxs_mem_buf_free(hba, mp));
969 		}
970 
971 		if (mp->flag & MAP_TABLE_ALLOCATED) {
972 			return (bp);
973 		}
974 
975 		if (!(mp->flag & MAP_POOL_ALLOCATED)) {
976 			return (NULL);
977 		}
978 		break;
979 	}
980 
981 	/* Free a buffer to the pool */
982 	if (emlxs_mem_pool_put(hba, seg, bp) == NULL) {
983 		return (NULL);
984 	}
985 
986 	return (bp);
987 
988 } /* emlxs_mem_put() */
989 
990 
991 /*
992  * Look up the virtual address given a mapped address
993  */
994 /* SLI3 */
995 extern MATCHMAP *
996 emlxs_mem_get_vaddr(emlxs_hba_t *hba, RING *rp, uint64_t mapbp)
997 {
998 	emlxs_port_t *port = &PPORT;
999 	MATCHMAP *prev;
1000 	MATCHMAP *mp;
1001 
1002 	if (rp->ringno == hba->channel_els) {
1003 		mp = (MATCHMAP *)rp->fc_mpoff;
1004 		prev = 0;
1005 
1006 		while (mp) {
1007 			if (mp->phys == mapbp) {
1008 				if (prev == 0) {
1009 					rp->fc_mpoff = mp->fc_mptr;
1010 				} else {
1011 					prev->fc_mptr = mp->fc_mptr;
1012 				}
1013 
1014 				if (rp->fc_mpon == (uint8_t *)mp) {
1015 					rp->fc_mpon = (uint8_t *)prev;
1016 				}
1017 
1018 				mp->fc_mptr = 0;
1019 
1020 				EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1021 				    DDI_DMA_SYNC_FORKERNEL);
1022 
1023 				HBASTATS.ElsUbPosted--;
1024 
1025 				return (mp);
1026 			}
1027 
1028 			prev = mp;
1029 			mp = (MATCHMAP *)mp->fc_mptr;
1030 		}
1031 
1032 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1033 		    "ELS Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1034 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1035 
1036 	} else if (rp->ringno == hba->channel_ct) {
1037 
1038 		mp = (MATCHMAP *)rp->fc_mpoff;
1039 		prev = 0;
1040 
1041 		while (mp) {
1042 			if (mp->phys == mapbp) {
1043 				if (prev == 0) {
1044 					rp->fc_mpoff = mp->fc_mptr;
1045 				} else {
1046 					prev->fc_mptr = mp->fc_mptr;
1047 				}
1048 
1049 				if (rp->fc_mpon == (uint8_t *)mp) {
1050 					rp->fc_mpon = (uint8_t *)prev;
1051 				}
1052 
1053 				mp->fc_mptr = 0;
1054 
1055 				EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1056 				    DDI_DMA_SYNC_FORKERNEL);
1057 
1058 				HBASTATS.CtUbPosted--;
1059 
1060 				return (mp);
1061 			}
1062 
1063 			prev = mp;
1064 			mp = (MATCHMAP *)mp->fc_mptr;
1065 		}
1066 
1067 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1068 		    "CT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1069 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1070 
1071 	} else if (rp->ringno == hba->channel_ip) {
1072 
1073 		mp = (MATCHMAP *)rp->fc_mpoff;
1074 		prev = 0;
1075 
1076 		while (mp) {
1077 			if (mp->phys == mapbp) {
1078 				if (prev == 0) {
1079 					rp->fc_mpoff = mp->fc_mptr;
1080 				} else {
1081 					prev->fc_mptr = mp->fc_mptr;
1082 				}
1083 
1084 				if (rp->fc_mpon == (uint8_t *)mp) {
1085 					rp->fc_mpon = (uint8_t *)prev;
1086 				}
1087 
1088 				mp->fc_mptr = 0;
1089 
1090 				EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1091 				    DDI_DMA_SYNC_FORKERNEL);
1092 
1093 				HBASTATS.IpUbPosted--;
1094 
1095 				return (mp);
1096 			}
1097 
1098 			prev = mp;
1099 			mp = (MATCHMAP *)mp->fc_mptr;
1100 		}
1101 
1102 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1103 		    "IP Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1104 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1105 
1106 #ifdef SFCT_SUPPORT
1107 	} else if (rp->ringno == hba->CHANNEL_FCT) {
1108 		mp = (MATCHMAP *)rp->fc_mpoff;
1109 		prev = 0;
1110 
1111 		while (mp) {
1112 			if (mp->phys == mapbp) {
1113 				if (prev == 0) {
1114 					rp->fc_mpoff = mp->fc_mptr;
1115 				} else {
1116 					prev->fc_mptr = mp->fc_mptr;
1117 				}
1118 
1119 				if (rp->fc_mpon == (uint8_t *)mp) {
1120 					rp->fc_mpon = (uint8_t *)prev;
1121 				}
1122 
1123 				mp->fc_mptr = 0;
1124 
1125 				EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1126 				    DDI_DMA_SYNC_FORKERNEL);
1127 
1128 				HBASTATS.FctUbPosted--;
1129 
1130 				return (mp);
1131 			}
1132 
1133 			prev = mp;
1134 			mp = (MATCHMAP *)mp->fc_mptr;
1135 		}
1136 
1137 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1138 		    "FCT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1139 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1140 
1141 #endif /* SFCT_SUPPORT */
1142 	}
1143 
1144 	return (0);
1145 
1146 } /* emlxs_mem_get_vaddr() */
1147 
1148 
1149 /*
1150  * Given a virtual address bp, generate the physical mapped address and
1151  * place it where addr points to. Save the address pair for lookup later.
1152  */
1153 /* SLI3 */
1154 extern void
1155 emlxs_mem_map_vaddr(emlxs_hba_t *hba, RING *rp, MATCHMAP *mp,
1156     uint32_t *haddr, uint32_t *laddr)
1157 {
1158 	if (rp->ringno == hba->channel_els) {
1159 		/*
1160 		 * Update slot fc_mpon points to then bump it
1161 		 * fc_mpoff is pointer head of the list.
1162 		 * fc_mpon is pointer tail of the list.
1163 		 */
1164 		mp->fc_mptr = 0;
1165 		if (rp->fc_mpoff == 0) {
1166 			rp->fc_mpoff = (uint8_t *)mp;
1167 			rp->fc_mpon = (uint8_t *)mp;
1168 		} else {
1169 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1170 			    (uint8_t *)mp;
1171 			rp->fc_mpon = (uint8_t *)mp;
1172 		}
1173 
1174 		if (hba->flag & FC_SLIM2_MODE) {
1175 
1176 			/* return mapped address */
1177 			*haddr = PADDR_HI(mp->phys);
1178 			/* return mapped address */
1179 			*laddr = PADDR_LO(mp->phys);
1180 		} else {
1181 			/* return mapped address */
1182 			*laddr = PADDR_LO(mp->phys);
1183 		}
1184 
1185 		HBASTATS.ElsUbPosted++;
1186 
1187 	} else if (rp->ringno == hba->channel_ct) {
1188 		/*
1189 		 * Update slot fc_mpon points to then bump it
1190 		 * fc_mpoff is pointer head of the list.
1191 		 * fc_mpon is pointer tail of the list.
1192 		 */
1193 		mp->fc_mptr = 0;
1194 		if (rp->fc_mpoff == 0) {
1195 			rp->fc_mpoff = (uint8_t *)mp;
1196 			rp->fc_mpon = (uint8_t *)mp;
1197 		} else {
1198 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1199 			    (uint8_t *)mp;
1200 			rp->fc_mpon = (uint8_t *)mp;
1201 		}
1202 
1203 		if (hba->flag & FC_SLIM2_MODE) {
1204 			/* return mapped address */
1205 			*haddr = PADDR_HI(mp->phys);
1206 			/* return mapped address */
1207 			*laddr = PADDR_LO(mp->phys);
1208 		} else {
1209 			/* return mapped address */
1210 			*laddr = PADDR_LO(mp->phys);
1211 		}
1212 
1213 		HBASTATS.CtUbPosted++;
1214 
1215 
1216 	} else if (rp->ringno == hba->channel_ip) {
1217 		/*
1218 		 * Update slot fc_mpon points to then bump it
1219 		 * fc_mpoff is pointer head of the list.
1220 		 * fc_mpon is pointer tail of the list.
1221 		 */
1222 		mp->fc_mptr = 0;
1223 		if (rp->fc_mpoff == 0) {
1224 			rp->fc_mpoff = (uint8_t *)mp;
1225 			rp->fc_mpon = (uint8_t *)mp;
1226 		} else {
1227 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1228 			    (uint8_t *)mp;
1229 			rp->fc_mpon = (uint8_t *)mp;
1230 		}
1231 
1232 		if (hba->flag & FC_SLIM2_MODE) {
1233 			/* return mapped address */
1234 			*haddr = PADDR_HI(mp->phys);
1235 			*laddr = PADDR_LO(mp->phys);
1236 		} else {
1237 			*laddr = PADDR_LO(mp->phys);
1238 		}
1239 
1240 		HBASTATS.IpUbPosted++;
1241 
1242 
1243 #ifdef SFCT_SUPPORT
1244 	} else if (rp->ringno == hba->CHANNEL_FCT) {
1245 		/*
1246 		 * Update slot fc_mpon points to then bump it
1247 		 * fc_mpoff is pointer head of the list.
1248 		 * fc_mpon is pointer tail of the list.
1249 		 */
1250 		mp->fc_mptr = 0;
1251 		if (rp->fc_mpoff == 0) {
1252 			rp->fc_mpoff = (uint8_t *)mp;
1253 			rp->fc_mpon = (uint8_t *)mp;
1254 		} else {
1255 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1256 			    (uint8_t *)mp;
1257 			rp->fc_mpon = (uint8_t *)mp;
1258 		}
1259 
1260 		if (hba->flag & FC_SLIM2_MODE) {
1261 			/* return mapped address */
1262 			*haddr = PADDR_HI(mp->phys);
1263 			/* return mapped address */
1264 			*laddr = PADDR_LO(mp->phys);
1265 		} else {
1266 			/* return mapped address */
1267 			*laddr = PADDR_LO(mp->phys);
1268 		}
1269 
1270 		HBASTATS.FctUbPosted++;
1271 
1272 #endif /* SFCT_SUPPORT */
1273 	}
1274 } /* emlxs_mem_map_vaddr() */
1275 
1276 
1277 /* SLI3 */
1278 uint32_t
1279 emlxs_hbq_alloc(emlxs_hba_t *hba, uint32_t hbq_id)
1280 {
1281 	emlxs_port_t *port = &PPORT;
1282 	HBQ_INIT_t *hbq;
1283 	MBUF_INFO *buf_info;
1284 	MBUF_INFO bufinfo;
1285 
1286 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
1287 
1288 	if (hbq->HBQ_host_buf.virt == 0) {
1289 		buf_info = &bufinfo;
1290 
1291 		/* Get the system's page size in a DDI-compliant way. */
1292 		bzero(buf_info, sizeof (MBUF_INFO));
1293 		buf_info->size = hbq->HBQ_numEntries * sizeof (HBQE_t);
1294 		buf_info->flags = FC_MBUF_DMA;
1295 		buf_info->align = 4096;
1296 
1297 		(void) emlxs_mem_alloc(hba, buf_info);
1298 
1299 		if (buf_info->virt == NULL) {
1300 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
1301 			    "Unable to alloc HBQ.");
1302 			return (ENOMEM);
1303 		}
1304 
1305 		hbq->HBQ_host_buf.virt = (void *)buf_info->virt;
1306 		hbq->HBQ_host_buf.phys = buf_info->phys;
1307 		hbq->HBQ_host_buf.data_handle = buf_info->data_handle;
1308 		hbq->HBQ_host_buf.dma_handle = buf_info->dma_handle;
1309 		hbq->HBQ_host_buf.size = buf_info->size;
1310 		hbq->HBQ_host_buf.tag = hbq_id;
1311 
1312 		bzero((char *)hbq->HBQ_host_buf.virt, buf_info->size);
1313 	}
1314 
1315 	return (0);
1316 
1317 } /* emlxs_hbq_alloc() */
1318