1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2009 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 #include <emlxs.h>
28 
29 EMLXS_MSG_DEF(EMLXS_MEM_C);
30 
31 
32 /*
33  *  emlxs_mem_alloc_buffer
34  *
35  *  This routine will allocate iocb/data buffer
36  *  space and setup the buffers for all rings on
37  *  the specified board to use. The data buffers
38  *  can be posted to the ring with the
39  *  fc_post_buffer routine.  The iocb buffers
40  *  are used to make a temp copy of the response
41  *  ring iocbs. Returns 0 if not enough memory,
42  *  Returns 1 if successful.
43  */
44 
45 
46 extern int32_t
47 emlxs_mem_alloc_buffer(emlxs_hba_t *hba)
48 {
49 	emlxs_port_t *port = &PPORT;
50 	emlxs_config_t *cfg;
51 	MBUF_INFO *buf_info;
52 	uint8_t *bp;
53 	uint8_t *oldbp;
54 	MEMSEG *mp;
55 	MATCHMAP *matp;
56 	NODELIST *ndlp;
57 	IOCBQ *iocbq;
58 	MAILBOXQ *mbox;
59 	MBUF_INFO bufinfo;
60 	int32_t i;
61 	RING *fcp_rp;
62 	RING *ip_rp;
63 	RING *els_rp;
64 	RING *ct_rp;
65 	uint32_t total_iotags;
66 #ifdef EMLXS_SPARC
67 	int32_t j;
68 	ULP_BDE64 *p_bpl;
69 	ULP_BDE64 *v_bpl;
70 #endif	/* EMLXS_SPARC */
71 
72 	buf_info = &bufinfo;
73 	cfg = &CFG;
74 
75 	mutex_enter(&EMLXS_MEMGET_LOCK);
76 
77 	/*
78 	 * Allocate and Initialize MEM_NLP (0)
79 	 */
80 	mp = &hba->memseg[MEM_NLP];
81 	mp->fc_memsize = sizeof (NODELIST);
82 	mp->fc_numblks = (int16_t)hba->max_nodes + 2;
83 	mp->fc_total_memsize = mp->fc_memsize * mp->fc_numblks;
84 	mp->fc_memstart_virt = kmem_zalloc(mp->fc_total_memsize, KM_SLEEP);
85 	mp->fc_memget_cnt = mp->fc_numblks;
86 	mp->fc_memput_cnt = 0;
87 	mp->fc_memstart_phys = 0;
88 	mp->fc_memflag = 0;
89 	mp->fc_lowmem = 0;
90 	mp->fc_mem_dma_handle = 0;
91 	mp->fc_mem_dat_handle = 0;
92 	mp->fc_memget_ptr = 0;
93 	mp->fc_memget_end = 0;
94 	mp->fc_memput_ptr = 0;
95 	mp->fc_memput_end = 0;
96 
97 	if (mp->fc_memstart_virt == NULL) {
98 		mutex_exit(&EMLXS_MEMGET_LOCK);
99 
100 		(void) emlxs_mem_free_buffer(hba);
101 
102 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
103 		    "NLP memory pool.");
104 
105 		return (0);
106 	}
107 	bzero(mp->fc_memstart_virt, mp->fc_memsize);
108 	ndlp = (NODELIST *)mp->fc_memstart_virt;
109 
110 	/*
111 	 * Link buffer into beginning of list. The first pointer
112 	 * in each buffer is a forward pointer to the next buffer.
113 	 */
114 	for (i = 0; i < mp->fc_numblks; i++, ndlp++) {
115 		ndlp->flag |= NODE_POOL_ALLOCATED;
116 
117 		oldbp = mp->fc_memget_ptr;
118 		bp = (uint8_t *)ndlp;
119 		if (oldbp == NULL) {
120 			mp->fc_memget_end = bp;
121 		}
122 		mp->fc_memget_ptr = bp;
123 		*((uint8_t **)bp) = oldbp;
124 	}
125 
126 
127 	/*
128 	 * Allocate and Initialize MEM_IOCB (1)
129 	 */
130 	mp = &hba->memseg[MEM_IOCB];
131 	mp->fc_memsize = sizeof (IOCBQ);
132 	mp->fc_numblks = (uint16_t)cfg[CFG_NUM_IOCBS].current;
133 	mp->fc_total_memsize = mp->fc_memsize * mp->fc_numblks;
134 	mp->fc_memstart_virt = kmem_zalloc(mp->fc_total_memsize, KM_SLEEP);
135 	mp->fc_lowmem = (mp->fc_numblks >> 4);
136 	mp->fc_memget_cnt = mp->fc_numblks;
137 	mp->fc_memput_cnt = 0;
138 	mp->fc_memflag = 0;
139 	mp->fc_memstart_phys = 0;
140 	mp->fc_mem_dma_handle = 0;
141 	mp->fc_mem_dat_handle = 0;
142 	mp->fc_memget_ptr = 0;
143 	mp->fc_memget_end = 0;
144 	mp->fc_memput_ptr = 0;
145 	mp->fc_memput_end = 0;
146 
147 	if (mp->fc_memstart_virt == NULL) {
148 		mutex_exit(&EMLXS_MEMGET_LOCK);
149 
150 		(void) emlxs_mem_free_buffer(hba);
151 
152 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
153 		    "IOCB memory pool.");
154 
155 		return (0);
156 	}
157 	bzero(mp->fc_memstart_virt, mp->fc_memsize);
158 	iocbq = (IOCBQ *)mp->fc_memstart_virt;
159 
160 	/*
161 	 * Link buffer into beginning of list. The first pointer
162 	 * in each buffer is a forward pointer to the next buffer.
163 	 */
164 	for (i = 0; i < mp->fc_numblks; i++, iocbq++) {
165 		iocbq->flag |= IOCB_POOL_ALLOCATED;
166 
167 		oldbp = mp->fc_memget_ptr;
168 		bp = (uint8_t *)iocbq;
169 		if (oldbp == NULL) {
170 			mp->fc_memget_end = bp;
171 		}
172 		mp->fc_memget_ptr = bp;
173 		*((uint8_t **)bp) = oldbp;
174 	}
175 
176 	/*
177 	 * Allocate and Initialize MEM_MBOX (2)
178 	 */
179 	mp = &hba->memseg[MEM_MBOX];
180 	mp->fc_memsize = sizeof (MAILBOXQ);
181 	mp->fc_numblks = (int16_t)hba->max_nodes + 32;
182 	mp->fc_total_memsize = mp->fc_memsize * mp->fc_numblks;
183 	mp->fc_memstart_virt = kmem_zalloc(mp->fc_total_memsize, KM_SLEEP);
184 	mp->fc_lowmem = (mp->fc_numblks >> 3);
185 	mp->fc_memget_cnt = mp->fc_numblks;
186 	mp->fc_memput_cnt = 0;
187 	mp->fc_memflag = 0;
188 	mp->fc_memstart_phys = 0;
189 	mp->fc_mem_dma_handle = 0;
190 	mp->fc_mem_dat_handle = 0;
191 	mp->fc_memget_ptr = 0;
192 	mp->fc_memget_end = 0;
193 	mp->fc_memput_ptr = 0;
194 	mp->fc_memput_end = 0;
195 
196 	if (mp->fc_memstart_virt == NULL) {
197 		mutex_exit(&EMLXS_MEMGET_LOCK);
198 
199 		(void) emlxs_mem_free_buffer(hba);
200 
201 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
202 		    "MBOX memory pool.");
203 
204 		return (0);
205 	}
206 	bzero(mp->fc_memstart_virt, mp->fc_memsize);
207 	mbox = (MAILBOXQ *)mp->fc_memstart_virt;
208 
209 	/*
210 	 * Link buffer into beginning of list. The first pointer
211 	 * in each buffer is a forward pointer to the next buffer.
212 	 */
213 	for (i = 0; i < mp->fc_numblks; i++, mbox++) {
214 		mbox->flag |= MBQ_POOL_ALLOCATED;
215 
216 		oldbp = mp->fc_memget_ptr;
217 		bp = (uint8_t *)mbox;
218 		if (oldbp == NULL) {
219 			mp->fc_memget_end = bp;
220 		}
221 		mp->fc_memget_ptr = bp;
222 		*((uint8_t **)bp) = oldbp;
223 	}
224 
225 	/*
226 	 * Initialize fc_table
227 	 */
228 	fcp_rp = &hba->ring[FC_FCP_RING];
229 	ip_rp = &hba->ring[FC_IP_RING];
230 	els_rp = &hba->ring[FC_ELS_RING];
231 	ct_rp = &hba->ring[FC_CT_RING];
232 
233 	fcp_rp->max_iotag = cfg[CFG_NUM_IOTAGS].current;
234 	ip_rp->max_iotag = hba->max_nodes;
235 	els_rp->max_iotag = hba->max_nodes;
236 	ct_rp->max_iotag = hba->max_nodes;
237 
238 	/* Allocate the fc_table */
239 	total_iotags = fcp_rp->max_iotag + ip_rp->max_iotag +
240 	    els_rp->max_iotag + ct_rp->max_iotag;
241 
242 	bzero(buf_info, sizeof (MBUF_INFO));
243 	buf_info->size = total_iotags * sizeof (emlxs_buf_t *);
244 	buf_info->align = sizeof (void *);
245 
246 	(void) emlxs_mem_alloc(hba, buf_info);
247 	if (buf_info->virt == NULL) {
248 		mutex_exit(&EMLXS_MEMGET_LOCK);
249 
250 		(void) emlxs_mem_free_buffer(hba);
251 
252 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
253 		    "fc_table buffer.");
254 
255 		return (0);
256 	}
257 	hba->iotag_table = buf_info->virt;
258 	fcp_rp->fc_table = &hba->iotag_table[0];
259 	ip_rp->fc_table = &hba->iotag_table[fcp_rp->max_iotag];
260 	els_rp->fc_table =
261 	    &hba->iotag_table[fcp_rp->max_iotag + ip_rp->max_iotag];
262 	ct_rp->fc_table =
263 	    &hba->iotag_table[fcp_rp->max_iotag + ip_rp->max_iotag +
264 	    els_rp->max_iotag];
265 
266 
267 #ifdef EMLXS_SPARC
268 	/*
269 	 * Allocate and Initialize FCP MEM_BPL's.
270 	 * This is for increased performance on sparc
271 	 */
272 
273 	bzero(buf_info, sizeof (MBUF_INFO));
274 	buf_info->size = fcp_rp->max_iotag * sizeof (MATCHMAP);
275 	buf_info->align = sizeof (void *);
276 
277 	(void) emlxs_mem_alloc(hba, buf_info);
278 	if (buf_info->virt == NULL) {
279 		mutex_exit(&EMLXS_MEMGET_LOCK);
280 
281 		(void) emlxs_mem_free_buffer(hba);
282 
283 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
284 		    "FCP BPL table buffer.");
285 
286 		return (0);
287 	}
288 	hba->fcp_bpl_table = buf_info->virt;
289 	bzero(hba->fcp_bpl_table, buf_info->size);
290 
291 	bzero(buf_info, sizeof (MBUF_INFO));
292 	buf_info->size = (fcp_rp->max_iotag * (3 * sizeof (ULP_BDE64)));
293 	buf_info->flags = FC_MBUF_DMA;
294 	buf_info->align = 32;
295 
296 	(void) emlxs_mem_alloc(hba, buf_info);
297 	if (buf_info->virt == NULL) {
298 		mutex_exit(&EMLXS_MEMGET_LOCK);
299 
300 		(void) emlxs_mem_free_buffer(hba);
301 
302 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
303 		    "FCP BPL DMA buffers.");
304 
305 		return (0);
306 	}
307 	bzero(buf_info->virt, buf_info->size);
308 
309 	hba->fcp_bpl_mp.size = buf_info->size;
310 	hba->fcp_bpl_mp.virt = buf_info->virt;
311 	hba->fcp_bpl_mp.phys = buf_info->phys;
312 	hba->fcp_bpl_mp.data_handle = buf_info->data_handle;
313 	hba->fcp_bpl_mp.dma_handle = buf_info->dma_handle;
314 	hba->fcp_bpl_mp.tag = NULL;
315 
316 	v_bpl = (ULP_BDE64 *)hba->fcp_bpl_mp.virt;
317 	p_bpl = (ULP_BDE64 *)hba->fcp_bpl_mp.phys;
318 	for (i = 0, j = 0; i < fcp_rp->max_iotag; i++, j += 3) {
319 		matp = &hba->fcp_bpl_table[i];
320 
321 		matp->fc_mptr = NULL;
322 		matp->size = (3 * sizeof (ULP_BDE64));
323 		matp->virt = (uint8_t *)&v_bpl[j];
324 		matp->phys = (uint64_t)&p_bpl[j];
325 		matp->dma_handle = NULL;
326 		matp->data_handle = NULL;
327 		matp->tag = MEM_BPL;
328 		matp->flag |= MAP_TABLE_ALLOCATED;
329 	}
330 #endif /* EMLXS_SPARC */
331 
332 	/*
333 	 * Allocate and Initialize MEM_BPL (3)
334 	 */
335 
336 	mp = &hba->memseg[MEM_BPL];
337 	mp->fc_memsize = hba->mem_bpl_size;	/* Set during attach */
338 	mp->fc_numblks = (uint16_t)cfg[CFG_NUM_IOCBS].current;
339 	mp->fc_memflag = FC_MEM_DMA;
340 	mp->fc_lowmem = (mp->fc_numblks >> 4);
341 	mp->fc_memstart_virt = 0;
342 	mp->fc_memstart_phys = 0;
343 	mp->fc_mem_dma_handle = 0;
344 	mp->fc_mem_dat_handle = 0;
345 	mp->fc_memget_ptr = 0;
346 	mp->fc_memget_end = 0;
347 	mp->fc_memput_ptr = 0;
348 	mp->fc_memput_end = 0;
349 	mp->fc_total_memsize = 0;
350 	mp->fc_memget_cnt = mp->fc_numblks;
351 	mp->fc_memput_cnt = 0;
352 
353 	/* Allocate buffer pools for above buffer structures */
354 	for (i = 0; i < mp->fc_numblks; i++) {
355 		/*
356 		 * If this is a DMA buffer we need alignment on a page
357 		 * so we don't want to worry about buffers spanning page
358 		 * boundries when mapping memory for the adapter.
359 		 */
360 		bzero(buf_info, sizeof (MBUF_INFO));
361 		buf_info->size = sizeof (MATCHMAP);
362 		buf_info->align = sizeof (void *);
363 
364 		(void) emlxs_mem_alloc(hba, buf_info);
365 		if (buf_info->virt == NULL) {
366 			mutex_exit(&EMLXS_MEMGET_LOCK);
367 
368 			(void) emlxs_mem_free_buffer(hba);
369 
370 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
371 			    "BPL segment buffer.");
372 
373 			return (0);
374 		}
375 
376 		matp = (MATCHMAP *)buf_info->virt;
377 		bzero(matp, sizeof (MATCHMAP));
378 
379 		bzero(buf_info, sizeof (MBUF_INFO));
380 		buf_info->size = mp->fc_memsize;
381 		buf_info->flags = FC_MBUF_DMA;
382 		buf_info->align = 32;
383 
384 		(void) emlxs_mem_alloc(hba, buf_info);
385 		if (buf_info->virt == NULL) {
386 			mutex_exit(&EMLXS_MEMGET_LOCK);
387 
388 			(void) emlxs_mem_free_buffer(hba);
389 
390 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
391 			    "BPL DMA buffer.");
392 
393 			return (0);
394 		}
395 		bp = (uint8_t *)buf_info->virt;
396 		bzero(bp, mp->fc_memsize);
397 
398 		/*
399 		 * Link buffer into beginning of list. The first pointer
400 		 * in each buffer is a forward pointer to the next buffer.
401 		 */
402 		oldbp = mp->fc_memget_ptr;
403 
404 		if (oldbp == 0) {
405 			mp->fc_memget_end = (uint8_t *)matp;
406 		}
407 
408 		mp->fc_memget_ptr = (uint8_t *)matp;
409 		matp->fc_mptr = oldbp;
410 		matp->virt = buf_info->virt;
411 		matp->phys = buf_info->phys;
412 		matp->size = buf_info->size;
413 		matp->dma_handle = buf_info->dma_handle;
414 		matp->data_handle = buf_info->data_handle;
415 		matp->tag = MEM_BPL;
416 		matp->flag |= MAP_POOL_ALLOCATED;
417 	}
418 
419 
420 	/*
421 	 * These represent the unsolicited ELS buffers we preallocate.
422 	 */
423 
424 	mp = &hba->memseg[MEM_BUF];
425 	mp->fc_memsize = MEM_BUF_SIZE;
426 	mp->fc_numblks = MEM_ELSBUF_COUNT + MEM_BUF_COUNT;
427 	mp->fc_memflag = FC_MEM_DMA;
428 	mp->fc_lowmem = 3;
429 	mp->fc_memstart_virt = 0;
430 	mp->fc_memstart_phys = 0;
431 	mp->fc_mem_dma_handle = 0;
432 	mp->fc_mem_dat_handle = 0;
433 	mp->fc_memget_ptr = 0;
434 	mp->fc_memget_end = 0;
435 	mp->fc_memput_ptr = 0;
436 	mp->fc_memput_end = 0;
437 	mp->fc_total_memsize = 0;
438 	mp->fc_memget_cnt = mp->fc_numblks;
439 	mp->fc_memput_cnt = 0;
440 
441 	/* Allocate buffer pools for above buffer structures */
442 	for (i = 0; i < mp->fc_numblks; i++) {
443 		/*
444 		 * If this is a DMA buffer we need alignment on a page
445 		 * so we don't want to worry about buffers spanning page
446 		 * boundries when mapping memory for the adapter.
447 		 */
448 		bzero(buf_info, sizeof (MBUF_INFO));
449 		buf_info->size = sizeof (MATCHMAP);
450 		buf_info->align = sizeof (void *);
451 
452 		(void) emlxs_mem_alloc(hba, buf_info);
453 		if (buf_info->virt == NULL) {
454 			mutex_exit(&EMLXS_MEMGET_LOCK);
455 
456 			(void) emlxs_mem_free_buffer(hba);
457 
458 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
459 			    "MEM_BUF Segment buffer.");
460 
461 			return (0);
462 		}
463 
464 		matp = (MATCHMAP *)buf_info->virt;
465 		bzero(matp, sizeof (MATCHMAP));
466 
467 		bzero(buf_info, sizeof (MBUF_INFO));
468 		buf_info->size = mp->fc_memsize;
469 		buf_info->flags = FC_MBUF_DMA;
470 		buf_info->align = 32;
471 
472 		(void) emlxs_mem_alloc(hba, buf_info);
473 		if (buf_info->virt == NULL) {
474 			mutex_exit(&EMLXS_MEMGET_LOCK);
475 
476 			(void) emlxs_mem_free_buffer(hba);
477 
478 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
479 			    "MEM_BUF DMA buffer.");
480 
481 			return (0);
482 		}
483 		bp = (uint8_t *)buf_info->virt;
484 		bzero(bp, mp->fc_memsize);
485 
486 		/*
487 		 * Link buffer into beginning of list. The first pointer
488 		 * in each buffer is a forward pointer to the next buffer.
489 		 */
490 		oldbp = mp->fc_memget_ptr;
491 
492 		if (oldbp == 0) {
493 			mp->fc_memget_end = (uint8_t *)matp;
494 		}
495 
496 		mp->fc_memget_ptr = (uint8_t *)matp;
497 		matp->fc_mptr = oldbp;
498 		matp->virt = buf_info->virt;
499 		matp->phys = buf_info->phys;
500 		matp->size = buf_info->size;
501 		matp->dma_handle = buf_info->dma_handle;
502 		matp->data_handle = buf_info->data_handle;
503 		matp->tag = MEM_BUF;
504 		matp->flag |= MAP_POOL_ALLOCATED;
505 	}
506 
507 
508 	/*
509 	 * These represent the unsolicited IP buffers we preallocate.
510 	 */
511 
512 	mp = &hba->memseg[MEM_IPBUF];
513 	mp->fc_memsize = MEM_IPBUF_SIZE;
514 	mp->fc_numblks = MEM_IPBUF_COUNT;
515 	mp->fc_memflag = FC_MEM_DMA;
516 	mp->fc_lowmem = 3;
517 	mp->fc_memstart_virt = 0;
518 	mp->fc_memstart_phys = 0;
519 	mp->fc_mem_dma_handle = 0;
520 	mp->fc_mem_dat_handle = 0;
521 	mp->fc_memget_ptr = 0;
522 	mp->fc_memget_end = 0;
523 	mp->fc_memput_ptr = 0;
524 	mp->fc_memput_end = 0;
525 	mp->fc_total_memsize = 0;
526 	mp->fc_memget_cnt = mp->fc_numblks;
527 	mp->fc_memput_cnt = 0;
528 
529 	/* Allocate buffer pools for above buffer structures */
530 	for (i = 0; i < mp->fc_numblks; i++) {
531 		/*
532 		 * If this is a DMA buffer we need alignment on a page
533 		 * so we don't want to worry about buffers spanning page
534 		 * boundries when mapping memory for the adapter.
535 		 */
536 		bzero(buf_info, sizeof (MBUF_INFO));
537 		buf_info->size = sizeof (MATCHMAP);
538 		buf_info->align = sizeof (void *);
539 
540 		(void) emlxs_mem_alloc(hba, buf_info);
541 		if (buf_info->virt == NULL) {
542 			mutex_exit(&EMLXS_MEMGET_LOCK);
543 
544 			(void) emlxs_mem_free_buffer(hba);
545 
546 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
547 			    "IP_BUF Segment buffer.");
548 
549 			return (0);
550 		}
551 
552 		matp = (MATCHMAP *)buf_info->virt;
553 		bzero(matp, sizeof (MATCHMAP));
554 
555 		bzero(buf_info, sizeof (MBUF_INFO));
556 		buf_info->size = mp->fc_memsize;
557 		buf_info->flags = FC_MBUF_DMA;
558 		buf_info->align = 32;
559 
560 		(void) emlxs_mem_alloc(hba, buf_info);
561 		if (buf_info->virt == NULL) {
562 			mutex_exit(&EMLXS_MEMGET_LOCK);
563 
564 			(void) emlxs_mem_free_buffer(hba);
565 
566 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
567 			    "IP_BUF DMA buffer.");
568 
569 			return (0);
570 		}
571 		bp = (uint8_t *)buf_info->virt;
572 		bzero(bp, mp->fc_memsize);
573 
574 		/*
575 		 * Link buffer into beginning of list. The first pointer
576 		 * in each buffer is a forward pointer to the next buffer.
577 		 */
578 		oldbp = mp->fc_memget_ptr;
579 
580 		if (oldbp == 0) {
581 			mp->fc_memget_end = (uint8_t *)matp;
582 		}
583 
584 		mp->fc_memget_ptr = (uint8_t *)matp;
585 		matp->fc_mptr = oldbp;
586 		matp->virt = buf_info->virt;
587 		matp->phys = buf_info->phys;
588 		matp->size = buf_info->size;
589 		matp->dma_handle = buf_info->dma_handle;
590 		matp->data_handle = buf_info->data_handle;
591 		matp->tag = MEM_IPBUF;
592 		matp->flag |= MAP_POOL_ALLOCATED;
593 	}
594 
595 	/*
596 	 * These represent the unsolicited CT buffers we preallocate.
597 	 */
598 	mp = &hba->memseg[MEM_CTBUF];
599 	mp->fc_memsize = MEM_CTBUF_SIZE;
600 	mp->fc_numblks = MEM_CTBUF_COUNT;
601 	mp->fc_memflag = FC_MEM_DMA;
602 	mp->fc_lowmem = 0;
603 	mp->fc_memstart_virt = 0;
604 	mp->fc_memstart_phys = 0;
605 	mp->fc_mem_dma_handle = 0;
606 	mp->fc_mem_dat_handle = 0;
607 	mp->fc_memget_ptr = 0;
608 	mp->fc_memget_end = 0;
609 	mp->fc_memput_ptr = 0;
610 	mp->fc_memput_end = 0;
611 	mp->fc_total_memsize = 0;
612 	mp->fc_memget_cnt = mp->fc_numblks;
613 	mp->fc_memput_cnt = 0;
614 
615 	/* Allocate buffer pools for above buffer structures */
616 	for (i = 0; i < mp->fc_numblks; i++) {
617 		/*
618 		 * If this is a DMA buffer we need alignment on a page
619 		 * so we don't want to worry about buffers spanning page
620 		 * boundries when mapping memory for the adapter.
621 		 */
622 		bzero(buf_info, sizeof (MBUF_INFO));
623 		buf_info->size = sizeof (MATCHMAP);
624 		buf_info->align = sizeof (void *);
625 
626 		(void) emlxs_mem_alloc(hba, buf_info);
627 		if (buf_info->virt == NULL) {
628 			mutex_exit(&EMLXS_MEMGET_LOCK);
629 
630 			(void) emlxs_mem_free_buffer(hba);
631 
632 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
633 			    "CT_BUF Segment buffer.");
634 
635 			return (0);
636 		}
637 
638 		matp = (MATCHMAP *)buf_info->virt;
639 		bzero(matp, sizeof (MATCHMAP));
640 
641 		bzero(buf_info, sizeof (MBUF_INFO));
642 		buf_info->size = mp->fc_memsize;
643 		buf_info->flags = FC_MBUF_DMA;
644 		buf_info->align = 32;
645 
646 		(void) emlxs_mem_alloc(hba, buf_info);
647 		if (buf_info->virt == NULL) {
648 			mutex_exit(&EMLXS_MEMGET_LOCK);
649 
650 			(void) emlxs_mem_free_buffer(hba);
651 
652 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
653 			    "CT_BUF DMA buffer.");
654 
655 			return (0);
656 		}
657 		bp = (uint8_t *)buf_info->virt;
658 		bzero(bp, mp->fc_memsize);
659 
660 		/*
661 		 * Link buffer into beginning of list. The first pointer
662 		 * in each buffer is a forward pointer to the next buffer.
663 		 */
664 		oldbp = mp->fc_memget_ptr;
665 
666 		if (oldbp == 0) {
667 			mp->fc_memget_end = (uint8_t *)matp;
668 		}
669 
670 		mp->fc_memget_ptr = (uint8_t *)matp;
671 		matp->fc_mptr = oldbp;
672 		matp->virt = buf_info->virt;
673 		matp->phys = buf_info->phys;
674 		matp->size = buf_info->size;
675 		matp->dma_handle = buf_info->dma_handle;
676 		matp->data_handle = buf_info->data_handle;
677 		matp->tag = MEM_CTBUF;
678 		matp->flag |= MAP_POOL_ALLOCATED;
679 	}
680 
681 #ifdef SFCT_SUPPORT
682 
683 	/*
684 	 * These represent the unsolicited FCT buffers we preallocate.
685 	 */
686 	mp = &hba->memseg[MEM_FCTBUF];
687 	mp->fc_memsize = MEM_FCTBUF_SIZE;
688 	mp->fc_numblks = (hba->tgt_mode) ? MEM_FCTBUF_COUNT : 0;
689 	mp->fc_memflag = FC_MEM_DMA;
690 	mp->fc_lowmem = 0;
691 	mp->fc_memstart_virt = 0;
692 	mp->fc_memstart_phys = 0;
693 	mp->fc_mem_dma_handle = 0;
694 	mp->fc_mem_dat_handle = 0;
695 	mp->fc_memget_ptr = 0;
696 	mp->fc_memget_end = 0;
697 	mp->fc_memput_ptr = 0;
698 	mp->fc_memput_end = 0;
699 	mp->fc_total_memsize = 0;
700 	mp->fc_memget_cnt = mp->fc_numblks;
701 	mp->fc_memput_cnt = 0;
702 
703 	/* Allocate buffer pools for above buffer structures */
704 	for (i = 0; i < mp->fc_numblks; i++) {
705 		/*
706 		 * If this is a DMA buffer we need alignment on a page
707 		 * so we don't want to worry about buffers spanning page
708 		 * boundries when mapping memory for the adapter.
709 		 */
710 		bzero(buf_info, sizeof (MBUF_INFO));
711 		buf_info->size = sizeof (MATCHMAP);
712 		buf_info->align = sizeof (void *);
713 
714 		(void) emlxs_mem_alloc(hba, buf_info);
715 		if (buf_info->virt == NULL) {
716 			mutex_exit(&EMLXS_MEMGET_LOCK);
717 
718 			(void) emlxs_mem_free_buffer(hba);
719 
720 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
721 			    "FCT_BUF Segment buffer.");
722 
723 			return (0);
724 		}
725 
726 		matp = (MATCHMAP *)buf_info->virt;
727 		bzero(matp, sizeof (MATCHMAP));
728 
729 		bzero(buf_info, sizeof (MBUF_INFO));
730 		buf_info->size = mp->fc_memsize;
731 		buf_info->flags = FC_MBUF_DMA;
732 		buf_info->align = 32;
733 
734 		(void) emlxs_mem_alloc(hba, buf_info);
735 		if (buf_info->virt == NULL) {
736 			mutex_exit(&EMLXS_MEMGET_LOCK);
737 
738 			(void) emlxs_mem_free_buffer(hba);
739 
740 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
741 			    "FCT_BUF DMA buffer.");
742 
743 			return (0);
744 		}
745 		bp = (uint8_t *)buf_info->virt;
746 		bzero(bp, mp->fc_memsize);
747 
748 		/*
749 		 * Link buffer into beginning of list. The first pointer
750 		 * in each buffer is a forward pointer to the next buffer.
751 		 */
752 		oldbp = mp->fc_memget_ptr;
753 
754 		if (oldbp == 0) {
755 			mp->fc_memget_end = (uint8_t *)matp;
756 		}
757 
758 		mp->fc_memget_ptr = (uint8_t *)matp;
759 		matp->fc_mptr = oldbp;
760 		matp->virt = buf_info->virt;
761 		matp->phys = buf_info->phys;
762 		matp->size = buf_info->size;
763 		matp->dma_handle = buf_info->dma_handle;
764 		matp->data_handle = buf_info->data_handle;
765 		matp->tag = MEM_FCTBUF;
766 		matp->flag |= MAP_POOL_ALLOCATED;
767 	}
768 #endif /* SFCT_SUPPORT */
769 
770 	for (i = 0; i < FC_MAX_SEG; i++) {
771 		char *seg;
772 
773 		switch (i) {
774 		case MEM_NLP:
775 			seg = "MEM_NLP";
776 			break;
777 		case MEM_IOCB:
778 			seg = "MEM_IOCB";
779 			break;
780 		case MEM_MBOX:
781 			seg = "MEM_MBOX";
782 			break;
783 		case MEM_BPL:
784 			seg = "MEM_BPL";
785 			break;
786 		case MEM_BUF:
787 			seg = "MEM_BUF";
788 			break;
789 		case MEM_IPBUF:
790 			seg = "MEM_IPBUF";
791 			break;
792 		case MEM_CTBUF:
793 			seg = "MEM_CTBUF";
794 			break;
795 #ifdef SFCT_SUPPORT
796 		case MEM_FCTBUF:
797 			seg = "MEM_FCTBUF";
798 			break;
799 #endif /* SFCT_SUPPORT */
800 		default:
801 			break;
802 		}
803 
804 		mp = &hba->memseg[i];
805 
806 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
807 		    "Segment: %s mp=%p size=%x count=%d flags=%x base=%p",
808 		    seg, mp, mp->fc_memsize, mp->fc_numblks, mp->fc_memflag,
809 		    mp->fc_memget_ptr);
810 	}
811 
812 	mutex_exit(&EMLXS_MEMGET_LOCK);
813 
814 	return (1);
815 
816 }  /* emlxs_mem_alloc_buffer() */
817 
818 
819 
820 /*
821  * emlxs_mem_free_buffer
822  *
823  * This routine will free iocb/data buffer space
824  * and TGTM resource.
825  */
826 extern int
827 emlxs_mem_free_buffer(emlxs_hba_t *hba)
828 {
829 	emlxs_port_t *port = &PPORT;
830 	emlxs_port_t *vport;
831 	int32_t j;
832 	uint8_t *bp;
833 	MEMSEG *mp;
834 	MATCHMAP *mm;
835 	RING *rp;
836 	IOCBQ *iocbq;
837 	IOCB *iocb;
838 	MAILBOXQ *mbox, *mbsave;
839 	MBUF_INFO *buf_info;
840 	MBUF_INFO bufinfo;
841 	emlxs_buf_t *sbp;
842 	fc_unsol_buf_t *ubp;
843 	RING *fcp_rp;
844 	RING *ip_rp;
845 	RING *els_rp;
846 	RING *ct_rp;
847 	uint32_t total_iotags;
848 	emlxs_ub_priv_t *ub_priv;
849 
850 	buf_info = &bufinfo;
851 
852 	/* Check for deferred pkt completion */
853 	if (hba->mbox_sbp) {
854 		sbp = (emlxs_buf_t *)hba->mbox_sbp;
855 		hba->mbox_sbp = 0;
856 
857 		emlxs_pkt_complete(sbp, -1, 0, 1);
858 	}
859 
860 	/* Check for deferred ub completion */
861 	if (hba->mbox_ubp) {
862 		ubp = (fc_unsol_buf_t *)hba->mbox_ubp;
863 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
864 		port = ub_priv->port;
865 		hba->mbox_ubp = 0;
866 
867 		emlxs_ub_callback(port, ubp);
868 	}
869 
870 #ifdef NPIV_SUPPORT
871 	/* Special handle for vport PLOGI */
872 	if (hba->mbox_iocbq == (uint8_t *)1) {
873 		hba->mbox_iocbq = NULL;
874 	}
875 #endif /* NPIV_SUPPORT */
876 
877 	/* Check for deferred iocb tx */
878 	if (hba->mbox_iocbq) {	/* iocb */
879 		iocbq = (IOCBQ *)hba->mbox_iocbq;
880 		hba->mbox_iocbq = 0;
881 		iocb = &iocbq->iocb;
882 
883 		/* Set the error status of the iocb */
884 		iocb->ulpStatus = IOSTAT_LOCAL_REJECT;
885 		iocb->un.grsp.perr.statLocalError = IOERR_ABORT_REQUESTED;
886 
887 		switch (iocb->ulpCommand) {
888 		case CMD_FCP_ICMND_CR:
889 		case CMD_FCP_ICMND_CX:
890 		case CMD_FCP_IREAD_CR:
891 		case CMD_FCP_IREAD_CX:
892 		case CMD_FCP_IWRITE_CR:
893 		case CMD_FCP_IWRITE_CX:
894 		case CMD_FCP_ICMND64_CR:
895 		case CMD_FCP_ICMND64_CX:
896 		case CMD_FCP_IREAD64_CR:
897 		case CMD_FCP_IREAD64_CX:
898 		case CMD_FCP_IWRITE64_CR:
899 		case CMD_FCP_IWRITE64_CX:
900 			rp = &hba->ring[FC_FCP_RING];
901 			emlxs_handle_fcp_event(hba, rp, iocbq);
902 			break;
903 
904 		case CMD_ELS_REQUEST_CR:
905 		case CMD_ELS_REQUEST_CX:
906 		case CMD_XMIT_ELS_RSP_CX:
907 		case CMD_ELS_REQUEST64_CR:	/* This is the only one used */
908 						/* for deferred iocb tx */
909 		case CMD_ELS_REQUEST64_CX:
910 		case CMD_XMIT_ELS_RSP64_CX:
911 			rp = &hba->ring[FC_ELS_RING];
912 			(void) emlxs_els_handle_event(hba, rp, iocbq);
913 			break;
914 
915 		case CMD_GEN_REQUEST64_CR:
916 		case CMD_GEN_REQUEST64_CX:
917 			rp = &hba->ring[FC_CT_RING];
918 			(void) emlxs_ct_handle_event(hba, rp, iocbq);
919 			break;
920 
921 		default:
922 			rp = (RING *)iocbq->ring;
923 
924 			if (rp) {
925 				if (rp->ringno == FC_ELS_RING) {
926 					(void) emlxs_mem_put(hba, MEM_ELSBUF,
927 					    (uint8_t *)iocbq->bp);
928 				} else if (rp->ringno == FC_CT_RING) {
929 					(void) emlxs_mem_put(hba, MEM_CTBUF,
930 					    (uint8_t *)iocbq->bp);
931 				} else if (rp->ringno == FC_IP_RING) {
932 					(void) emlxs_mem_put(hba, MEM_IPBUF,
933 					    (uint8_t *)iocbq->bp);
934 				}
935 #ifdef SFCT_SUPPORT
936 				else if (rp->ringno == FC_FCT_RING) {
937 					(void) emlxs_mem_put(hba, MEM_FCTBUF,
938 					    (uint8_t *)iocbq->bp);
939 				}
940 #endif /* SFCT_SUPPORT */
941 
942 			} else if (iocbq->bp) {
943 				(void) emlxs_mem_put(hba, MEM_BUF,
944 				    (uint8_t *)iocbq->bp);
945 			}
946 
947 			if (!iocbq->sbp) {
948 				(void) emlxs_mem_put(hba, MEM_IOCB,
949 				    (uint8_t *)iocbq);
950 			}
951 		}
952 	}
953 
954 	/* free the mapped address match area for each ring */
955 	for (j = 0; j < hba->ring_count; j++) {
956 		rp = &hba->ring[j];
957 
958 		/* Flush the ring */
959 		(void) emlxs_tx_ring_flush(hba, rp, 0);
960 
961 		while (rp->fc_mpoff) {
962 			uint64_t addr;
963 
964 			addr = 0;
965 			mm = (MATCHMAP *)(rp->fc_mpoff);
966 
967 			if ((j == FC_ELS_RING) || (j == FC_CT_RING) ||
968 #ifdef SFCT_SUPPORT
969 			    (j == FC_FCT_RING) ||
970 #endif /* SFCT_SUPPORT */
971 			    (j == FC_IP_RING)) {
972 				addr = mm->phys;
973 			}
974 
975 			if ((mm = emlxs_mem_get_vaddr(hba, rp, addr))) {
976 				if (j == FC_ELS_RING) {
977 					(void) emlxs_mem_put(hba, MEM_ELSBUF,
978 					    (uint8_t *)mm);
979 				} else if (j == FC_CT_RING) {
980 					(void) emlxs_mem_put(hba, MEM_CTBUF,
981 					    (uint8_t *)mm);
982 				} else if (j == FC_IP_RING) {
983 					(void) emlxs_mem_put(hba, MEM_IPBUF,
984 					    (uint8_t *)mm);
985 				}
986 #ifdef SFCT_SUPPORT
987 				else if (j == FC_FCT_RING) {
988 					(void) emlxs_mem_put(hba, MEM_FCTBUF,
989 					    (uint8_t *)mm);
990 				}
991 #endif /* SFCT_SUPPORT */
992 
993 			}
994 		}
995 	}
996 
997 #ifdef SLI3_SUPPORT
998 	if (hba->flag & FC_HBQ_ENABLED) {
999 		emlxs_hbq_free_all(hba, EMLXS_ELS_HBQ_ID);
1000 		emlxs_hbq_free_all(hba, EMLXS_IP_HBQ_ID);
1001 		emlxs_hbq_free_all(hba, EMLXS_CT_HBQ_ID);
1002 #ifdef SFCT_SUPPORT
1003 		if (hba->tgt_mode) {
1004 			emlxs_hbq_free_all(hba, EMLXS_FCT_HBQ_ID);
1005 		}
1006 #endif /* SFCT_SUPPORT */
1007 
1008 	}
1009 #endif /* SLI3_SUPPORT */
1010 
1011 	/* Free everything on mbox queue */
1012 	mbox = (MAILBOXQ *)(hba->mbox_queue.q_first);
1013 	while (mbox) {
1014 		mbsave = mbox;
1015 		mbox = (MAILBOXQ *)mbox->next;
1016 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbsave);
1017 	}
1018 	hba->mbox_queue.q_first = NULL;
1019 	hba->mbox_queue.q_last = NULL;
1020 	hba->mbox_queue.q_cnt = 0;
1021 	hba->mbox_queue_flag = 0;
1022 
1023 	/* Free the nodes */
1024 	for (j = 0; j < MAX_VPORTS; j++) {
1025 		vport = &VPORT(j);
1026 		if (vport->node_count) {
1027 			emlxs_node_destroy_all(vport);
1028 		}
1029 	}
1030 
1031 	/* Free memory associated with all buffers on get buffer pool */
1032 	if (hba->iotag_table) {
1033 		fcp_rp = &hba->ring[FC_FCP_RING];
1034 		ip_rp = &hba->ring[FC_IP_RING];
1035 		els_rp = &hba->ring[FC_ELS_RING];
1036 		ct_rp = &hba->ring[FC_CT_RING];
1037 
1038 		total_iotags = fcp_rp->max_iotag + ip_rp->max_iotag +
1039 		    els_rp->max_iotag + ct_rp->max_iotag;
1040 
1041 		bzero(buf_info, sizeof (MBUF_INFO));
1042 		buf_info->size = total_iotags * sizeof (emlxs_buf_t *);
1043 		buf_info->virt = hba->iotag_table;
1044 		emlxs_mem_free(hba, buf_info);
1045 
1046 		hba->iotag_table = 0;
1047 	}
1048 #ifdef EMLXS_SPARC
1049 	if (hba->fcp_bpl_table) {
1050 		bzero(buf_info, sizeof (MBUF_INFO));
1051 		buf_info->size = fcp_rp->max_iotag * sizeof (MATCHMAP);
1052 		buf_info->virt = hba->fcp_bpl_table;
1053 		emlxs_mem_free(hba, buf_info);
1054 
1055 		hba->fcp_bpl_table = 0;
1056 	}
1057 
1058 	if (hba->fcp_bpl_mp.virt) {
1059 		bzero(buf_info, sizeof (MBUF_INFO));
1060 		buf_info->size = hba->fcp_bpl_mp.size;
1061 		buf_info->virt = hba->fcp_bpl_mp.virt;
1062 		buf_info->phys = hba->fcp_bpl_mp.phys;
1063 		buf_info->dma_handle = hba->fcp_bpl_mp.dma_handle;
1064 		buf_info->data_handle = hba->fcp_bpl_mp.data_handle;
1065 		buf_info->flags = FC_MBUF_DMA;
1066 		emlxs_mem_free(hba, buf_info);
1067 
1068 		bzero(&hba->fcp_bpl_mp, sizeof (MATCHMAP));
1069 	}
1070 #endif /* EMLXS_SPARC */
1071 
1072 	/* Free the memory segments */
1073 	for (j = 0; j < FC_MAX_SEG; j++) {
1074 		mp = &hba->memseg[j];
1075 
1076 		/* MEM_NLP, MEM_IOCB, MEM_MBOX */
1077 		if (j < MEM_BPL) {
1078 			if (mp->fc_memstart_virt) {
1079 				kmem_free(mp->fc_memstart_virt,
1080 				    mp->fc_total_memsize);
1081 				bzero((char *)mp, sizeof (MEMSEG));
1082 			}
1083 
1084 			continue;
1085 		}
1086 
1087 		/*
1088 		 * MEM_BPL, MEM_BUF, MEM_ELSBUF,
1089 		 * MEM_IPBUF, MEM_CTBUF, MEM_FCTBUF
1090 		 */
1091 
1092 		/* Free memory associated with all buffers on get buffer pool */
1093 		mutex_enter(&EMLXS_MEMGET_LOCK);
1094 		while ((bp = mp->fc_memget_ptr) != NULL) {
1095 			mp->fc_memget_ptr = *((uint8_t **)bp);
1096 			mm = (MATCHMAP *)bp;
1097 
1098 			bzero(buf_info, sizeof (MBUF_INFO));
1099 			buf_info->size = mm->size;
1100 			buf_info->virt = mm->virt;
1101 			buf_info->phys = mm->phys;
1102 			buf_info->dma_handle = mm->dma_handle;
1103 			buf_info->data_handle = mm->data_handle;
1104 			buf_info->flags = FC_MBUF_DMA;
1105 			emlxs_mem_free(hba, buf_info);
1106 
1107 			bzero(buf_info, sizeof (MBUF_INFO));
1108 			buf_info->size = sizeof (MATCHMAP);
1109 			buf_info->virt = (uint32_t *)mm;
1110 			emlxs_mem_free(hba, buf_info);
1111 		}
1112 		mutex_exit(&EMLXS_MEMGET_LOCK);
1113 
1114 		/* Free memory associated with all buffers on put buffer pool */
1115 		mutex_enter(&EMLXS_MEMPUT_LOCK);
1116 		while ((bp = mp->fc_memput_ptr) != NULL) {
1117 			mp->fc_memput_ptr = *((uint8_t **)bp);
1118 			mm = (MATCHMAP *)bp;
1119 
1120 			bzero(buf_info, sizeof (MBUF_INFO));
1121 			buf_info->size = mm->size;
1122 			buf_info->virt = mm->virt;
1123 			buf_info->phys = mm->phys;
1124 			buf_info->dma_handle = mm->dma_handle;
1125 			buf_info->data_handle = mm->data_handle;
1126 			buf_info->flags = FC_MBUF_DMA;
1127 			emlxs_mem_free(hba, buf_info);
1128 
1129 			bzero(buf_info, sizeof (MBUF_INFO));
1130 			buf_info->size = sizeof (MATCHMAP);
1131 			buf_info->virt = (uint32_t *)mm;
1132 			emlxs_mem_free(hba, buf_info);
1133 		}
1134 		mutex_exit(&EMLXS_MEMPUT_LOCK);
1135 		bzero((char *)mp, sizeof (MEMSEG));
1136 	}
1137 
1138 	return (0);
1139 
1140 }  /* emlxs_mem_free_buffer() */
1141 
1142 
1143 extern uint8_t *
1144 emlxs_mem_buf_alloc(emlxs_hba_t *hba)
1145 {
1146 	emlxs_port_t *port = &PPORT;
1147 	uint8_t *bp = NULL;
1148 	MATCHMAP *matp = NULL;
1149 	MBUF_INFO *buf_info;
1150 	MBUF_INFO bufinfo;
1151 
1152 	buf_info = &bufinfo;
1153 
1154 	bzero(buf_info, sizeof (MBUF_INFO));
1155 	buf_info->size = sizeof (MATCHMAP);
1156 	buf_info->align = sizeof (void *);
1157 
1158 	(void) emlxs_mem_alloc(hba, buf_info);
1159 	if (buf_info->virt == NULL) {
1160 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1161 		    "MEM_BUF_ALLOC buffer.");
1162 
1163 		return (0);
1164 	}
1165 
1166 	matp = (MATCHMAP *)buf_info->virt;
1167 	bzero(matp, sizeof (MATCHMAP));
1168 
1169 	bzero(buf_info, sizeof (MBUF_INFO));
1170 	buf_info->size = MEM_BUF_SIZE;
1171 	buf_info->flags = FC_MBUF_DMA;
1172 	buf_info->align = 32;
1173 
1174 	(void) emlxs_mem_alloc(hba, buf_info);
1175 	if (buf_info->virt == NULL) {
1176 
1177 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1178 		    "MEM_BUF_ALLOC DMA buffer.");
1179 
1180 		/* Free the matp object */
1181 		bzero(buf_info, sizeof (MBUF_INFO));
1182 		buf_info->size = sizeof (MATCHMAP);
1183 		buf_info->virt = (uint32_t *)matp;
1184 		emlxs_mem_free(hba, buf_info);
1185 
1186 		return (0);
1187 	}
1188 	bp = (uint8_t *)buf_info->virt;
1189 	bzero(bp, MEM_BUF_SIZE);
1190 
1191 	matp->fc_mptr = NULL;
1192 	matp->virt = buf_info->virt;
1193 	matp->phys = buf_info->phys;
1194 	matp->size = buf_info->size;
1195 	matp->dma_handle = buf_info->dma_handle;
1196 	matp->data_handle = buf_info->data_handle;
1197 	matp->tag = MEM_BUF;
1198 	matp->flag |= MAP_BUF_ALLOCATED;
1199 
1200 	return ((uint8_t *)matp);
1201 
1202 }  /* emlxs_mem_buf_alloc() */
1203 
1204 
1205 extern uint8_t *
1206 emlxs_mem_buf_free(emlxs_hba_t *hba, uint8_t *bp)
1207 {
1208 	MATCHMAP *matp;
1209 	MBUF_INFO bufinfo;
1210 	MBUF_INFO *buf_info;
1211 
1212 	buf_info = &bufinfo;
1213 
1214 	matp = (MATCHMAP *)bp;
1215 
1216 	if (!(matp->flag & MAP_BUF_ALLOCATED)) {
1217 		return (NULL);
1218 	}
1219 
1220 	bzero(buf_info, sizeof (MBUF_INFO));
1221 	buf_info->size = matp->size;
1222 	buf_info->virt = matp->virt;
1223 	buf_info->phys = matp->phys;
1224 	buf_info->dma_handle = matp->dma_handle;
1225 	buf_info->data_handle = matp->data_handle;
1226 	buf_info->flags = FC_MBUF_DMA;
1227 	emlxs_mem_free(hba, buf_info);
1228 
1229 	bzero(buf_info, sizeof (MBUF_INFO));
1230 	buf_info->size = sizeof (MATCHMAP);
1231 	buf_info->virt = (uint32_t *)matp;
1232 	emlxs_mem_free(hba, buf_info);
1233 
1234 	return (bp);
1235 
1236 }  /* emlxs_mem_buf_free() */
1237 
1238 
1239 
1240 /*
1241  * emlxs_mem_get
1242  *
1243  * This routine will get a free memory buffer.
1244  * seg identifies which buffer pool to use.
1245  * Returns the free buffer ptr or 0 for no buf
1246  */
1247 extern uint8_t *
1248 emlxs_mem_get(emlxs_hba_t *hba, uint32_t arg)
1249 {
1250 	emlxs_port_t *port = &PPORT;
1251 	MEMSEG *mp;
1252 	uint8_t *bp = NULL;
1253 	uint32_t seg = arg & MEM_SEG_MASK;
1254 	MAILBOXQ *mbq;
1255 	MATCHMAP *matp;
1256 	IOCBQ *iocbq;
1257 	NODELIST *node;
1258 	uint8_t *base;
1259 	uint8_t *end;
1260 
1261 	/* range check on seg argument */
1262 	if (seg >= FC_MAX_SEG) {
1263 		return (NULL);
1264 	}
1265 
1266 	mp = &hba->memseg[seg];
1267 
1268 	/* Check if memory segment destroyed! */
1269 	if (mp->fc_memsize == 0) {
1270 		return (NULL);
1271 	}
1272 
1273 	mutex_enter(&EMLXS_MEMGET_LOCK);
1274 
1275 top:
1276 
1277 	if (mp->fc_memget_ptr) {
1278 		bp = mp->fc_memget_ptr;
1279 
1280 		/* Checking if seg == MEM_MBOX, MEM_IOCB or MEM_NLP */
1281 		/* Verify buffer is in this memory region */
1282 		if (mp->fc_memstart_virt && mp->fc_total_memsize) {
1283 			base = mp->fc_memstart_virt;
1284 			end = mp->fc_memstart_virt + mp->fc_total_memsize;
1285 			if (bp < base || bp >= end) {
1286 				/* Invalidate the the get list */
1287 				mp->fc_memget_ptr = NULL;
1288 				mp->fc_memget_end = NULL;
1289 				mp->fc_memget_cnt = 0;
1290 
1291 				EMLXS_MSGF(EMLXS_CONTEXT,
1292 				    &emlxs_pool_error_msg,
1293 				    "Corruption detected: seg=%x bp=%p "
1294 				    "base=%p end=%p.", seg, bp, base, end);
1295 
1296 				emlxs_ffstate_change(hba, FC_ERROR);
1297 
1298 				mutex_exit(&EMLXS_MEMGET_LOCK);
1299 
1300 				emlxs_thread_spawn(hba, emlxs_shutdown_thread,
1301 				    NULL, NULL);
1302 
1303 				return (NULL);
1304 			}
1305 		}
1306 
1307 		/*
1308 		 * If a memory block exists, take it off freelist
1309 		 * and return it to the user.
1310 		 */
1311 		if (mp->fc_memget_end == bp) {
1312 			mp->fc_memget_ptr = NULL;
1313 			mp->fc_memget_end = NULL;
1314 			mp->fc_memget_cnt = 0;
1315 
1316 		} else {
1317 			/*
1318 			 * Pointer to the next free buffer
1319 			 */
1320 			mp->fc_memget_ptr = *((uint8_t **)bp);
1321 			mp->fc_memget_cnt--;
1322 		}
1323 
1324 		switch (seg) {
1325 		case MEM_MBOX:
1326 			bzero(bp, sizeof (MAILBOXQ));
1327 
1328 			mbq = (MAILBOXQ *)bp;
1329 			mbq->flag |= MBQ_POOL_ALLOCATED;
1330 			break;
1331 
1332 		case MEM_IOCB:
1333 			bzero(bp, sizeof (IOCBQ));
1334 
1335 			iocbq = (IOCBQ *)bp;
1336 			iocbq->flag |= IOCB_POOL_ALLOCATED;
1337 			break;
1338 
1339 		case MEM_NLP:
1340 			bzero(bp, sizeof (NODELIST));
1341 
1342 			node = (NODELIST *)bp;
1343 			node->flag |= NODE_POOL_ALLOCATED;
1344 			break;
1345 
1346 		case MEM_BPL:
1347 		case MEM_BUF:	/* MEM_ELSBUF */
1348 		case MEM_IPBUF:
1349 		case MEM_CTBUF:
1350 #ifdef SFCT_SUPPORT
1351 		case MEM_FCTBUF:
1352 #endif /* SFCT_SUPPORT */
1353 		default:
1354 			matp = (MATCHMAP *)bp;
1355 			matp->fc_mptr = NULL;
1356 			matp->flag |= MAP_POOL_ALLOCATED;
1357 			break;
1358 		}
1359 	} else {
1360 		mutex_enter(&EMLXS_MEMPUT_LOCK);
1361 		if (mp->fc_memput_ptr) {
1362 			/*
1363 			 * Move buffer from memput to memget
1364 			 */
1365 			mp->fc_memget_ptr = mp->fc_memput_ptr;
1366 			mp->fc_memget_end = mp->fc_memput_end;
1367 			mp->fc_memget_cnt = mp->fc_memput_cnt;
1368 			mp->fc_memput_ptr = NULL;
1369 			mp->fc_memput_end = NULL;
1370 			mp->fc_memput_cnt = 0;
1371 			mutex_exit(&EMLXS_MEMPUT_LOCK);
1372 
1373 			goto top;
1374 		}
1375 		mutex_exit(&EMLXS_MEMPUT_LOCK);
1376 
1377 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_alloc_failed_msg,
1378 		    "Pool empty: seg=%x lowmem=%x free=%x", seg,
1379 		    mp->fc_lowmem, mp->fc_memget_cnt);
1380 
1381 		/* HBASTATS.memAllocErr++; */
1382 	}
1383 
1384 	mutex_exit(&EMLXS_MEMGET_LOCK);
1385 
1386 	return (bp);
1387 
1388 }  /* emlxs_mem_get() */
1389 
1390 
1391 
1392 extern uint8_t *
1393 emlxs_mem_put(emlxs_hba_t *hba, uint32_t seg, uint8_t *bp)
1394 {
1395 	emlxs_port_t *port = &PPORT;
1396 	MEMSEG *mp;
1397 	uint8_t *oldbp;
1398 	MATCHMAP *matp;
1399 	IOCBQ *iocbq;
1400 	MAILBOXQ *mbq;
1401 	NODELIST *node;
1402 	uint8_t *base;
1403 	uint8_t *end;
1404 
1405 	if (!bp) {
1406 		return (NULL);
1407 	}
1408 
1409 	/* Check on seg argument */
1410 	if (seg >= FC_MAX_SEG) {
1411 		return (NULL);
1412 	}
1413 
1414 	mp = &hba->memseg[seg];
1415 
1416 	switch (seg) {
1417 	case MEM_MBOX:
1418 		mbq = (MAILBOXQ *)bp;
1419 
1420 		if (!(mbq->flag & MBQ_POOL_ALLOCATED)) {
1421 			return (bp);
1422 		}
1423 		break;
1424 
1425 	case MEM_IOCB:
1426 		iocbq = (IOCBQ *)bp;
1427 
1428 		/* Check to make sure the IOCB is pool allocated */
1429 		if (!(iocbq->flag & IOCB_POOL_ALLOCATED)) {
1430 			return (bp);
1431 		}
1432 
1433 		/* Any IOCBQ with a packet attached did not come */
1434 		/* from our pool */
1435 		if (iocbq->sbp) {
1436 			return (bp);
1437 		}
1438 		break;
1439 
1440 	case MEM_NLP:
1441 		node = (NODELIST *)bp;
1442 
1443 		/* Check to make sure the NODE is pool allocated */
1444 		if (!(node->flag & NODE_POOL_ALLOCATED)) {
1445 			return (bp);
1446 		}
1447 		break;
1448 
1449 	case MEM_BPL:
1450 	case MEM_BUF:	/* MEM_ELSBUF */
1451 	case MEM_IPBUF:
1452 	case MEM_CTBUF:
1453 #ifdef SFCT_SUPPORT
1454 	case MEM_FCTBUF:
1455 #endif /* SFCT_SUPPORT */
1456 	default:
1457 		matp = (MATCHMAP *)bp;
1458 
1459 		if (matp->flag & MAP_BUF_ALLOCATED) {
1460 			return (emlxs_mem_buf_free(hba, bp));
1461 		}
1462 
1463 		if (matp->flag & MAP_TABLE_ALLOCATED) {
1464 			return (bp);
1465 		}
1466 
1467 		/* Check to make sure the MATCHMAP is pool allocated */
1468 		if (!(matp->flag & MAP_POOL_ALLOCATED)) {
1469 			return (bp);
1470 		}
1471 		break;
1472 	}
1473 
1474 	/* Free the pool object */
1475 	mutex_enter(&EMLXS_MEMPUT_LOCK);
1476 
1477 	/* Check if memory segment destroyed! */
1478 	if (mp->fc_memsize == 0) {
1479 		mutex_exit(&EMLXS_MEMPUT_LOCK);
1480 		return (NULL);
1481 	}
1482 
1483 	/* Check if buffer was just freed */
1484 	if (mp->fc_memput_ptr == bp) {
1485 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1486 		    "Freeing Free object: seg=%x bp=%p", seg, bp);
1487 
1488 		mutex_exit(&EMLXS_MEMPUT_LOCK);
1489 		return (NULL);
1490 	}
1491 
1492 	/* Validate the buffer */
1493 
1494 	/* Checking if seg == MEM_BUF, MEM_BPL, MEM_CTBUF, */
1495 	/* MEM_IPBUF or MEM_FCTBUF */
1496 	if (mp->fc_memflag & FC_MEM_DMA) {
1497 		if (matp->tag != seg) {
1498 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1499 			    "Corruption detected: seg=%x tag=%x bp=%p", seg,
1500 			    matp->tag, bp);
1501 
1502 			emlxs_ffstate_change(hba, FC_ERROR);
1503 
1504 			mutex_exit(&EMLXS_MEMPUT_LOCK);
1505 
1506 			emlxs_thread_spawn(hba, emlxs_shutdown_thread,
1507 			    NULL, NULL);
1508 
1509 			return (NULL);
1510 		}
1511 	}
1512 
1513 	/* Checking (seg == MEM_MBOX || seg == MEM_IOCB || seg == MEM_NLP) */
1514 	else if (mp->fc_memstart_virt && mp->fc_total_memsize) {
1515 		base = mp->fc_memstart_virt;
1516 		end = mp->fc_memstart_virt + mp->fc_total_memsize;
1517 		if (bp < base || bp >= end) {
1518 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1519 			    "Corruption detected: seg=%x bp=%p base=%p end=%p",
1520 			    seg, bp, base, end);
1521 
1522 			emlxs_ffstate_change(hba, FC_ERROR);
1523 
1524 			mutex_exit(&EMLXS_MEMPUT_LOCK);
1525 
1526 			emlxs_thread_spawn(hba, emlxs_shutdown_thread,
1527 			    NULL, NULL);
1528 
1529 			return (NULL);
1530 		}
1531 	}
1532 
1533 	/* Release to the first place of the freelist */
1534 	oldbp = mp->fc_memput_ptr;
1535 	mp->fc_memput_ptr = bp;
1536 	*((uint8_t **)bp) = oldbp;
1537 
1538 	if (oldbp == NULL) {
1539 		mp->fc_memput_end = bp;
1540 		mp->fc_memput_cnt = 1;
1541 	} else {
1542 		mp->fc_memput_cnt++;
1543 	}
1544 
1545 	mutex_exit(&EMLXS_MEMPUT_LOCK);
1546 
1547 	return (bp);
1548 
1549 }  /* emlxs_mem_put() */
1550 
1551 
1552 
1553 /*
1554  * Look up the virtual address given a mapped address
1555  */
1556 extern MATCHMAP *
1557 emlxs_mem_get_vaddr(emlxs_hba_t *hba, RING *rp, uint64_t mapbp)
1558 {
1559 	emlxs_port_t *port = &PPORT;
1560 	MATCHMAP *prev;
1561 	MATCHMAP *mp;
1562 
1563 	switch (rp->ringno) {
1564 	case FC_ELS_RING:
1565 		mp = (MATCHMAP *)rp->fc_mpoff;
1566 		prev = 0;
1567 
1568 		while (mp) {
1569 			if (mp->phys == mapbp) {
1570 				if (prev == 0) {
1571 					rp->fc_mpoff = mp->fc_mptr;
1572 				} else {
1573 					prev->fc_mptr = mp->fc_mptr;
1574 				}
1575 
1576 				if (rp->fc_mpon == (uint8_t *)mp) {
1577 					rp->fc_mpon = (uint8_t *)prev;
1578 				}
1579 
1580 				mp->fc_mptr = 0;
1581 
1582 				emlxs_mpdata_sync(mp->dma_handle, 0, mp->size,
1583 				    DDI_DMA_SYNC_FORKERNEL);
1584 
1585 				HBASTATS.ElsUbPosted--;
1586 
1587 				return (mp);
1588 			}
1589 
1590 			prev = mp;
1591 			mp = (MATCHMAP *)mp->fc_mptr;
1592 		}
1593 
1594 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1595 		    "ELS Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1596 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1597 
1598 		break;
1599 
1600 	case FC_CT_RING:
1601 		mp = (MATCHMAP *)rp->fc_mpoff;
1602 		prev = 0;
1603 
1604 		while (mp) {
1605 			if (mp->phys == mapbp) {
1606 				if (prev == 0) {
1607 					rp->fc_mpoff = mp->fc_mptr;
1608 				} else {
1609 					prev->fc_mptr = mp->fc_mptr;
1610 				}
1611 
1612 				if (rp->fc_mpon == (uint8_t *)mp) {
1613 					rp->fc_mpon = (uint8_t *)prev;
1614 				}
1615 
1616 				mp->fc_mptr = 0;
1617 
1618 				emlxs_mpdata_sync(mp->dma_handle, 0, mp->size,
1619 				    DDI_DMA_SYNC_FORKERNEL);
1620 
1621 				HBASTATS.CtUbPosted--;
1622 
1623 				return (mp);
1624 			}
1625 
1626 			prev = mp;
1627 			mp = (MATCHMAP *)mp->fc_mptr;
1628 		}
1629 
1630 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1631 		    "CT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1632 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1633 
1634 		break;
1635 
1636 	case FC_IP_RING:
1637 		mp = (MATCHMAP *)rp->fc_mpoff;
1638 		prev = 0;
1639 
1640 		while (mp) {
1641 			if (mp->phys == mapbp) {
1642 				if (prev == 0) {
1643 					rp->fc_mpoff = mp->fc_mptr;
1644 				} else {
1645 					prev->fc_mptr = mp->fc_mptr;
1646 				}
1647 
1648 				if (rp->fc_mpon == (uint8_t *)mp) {
1649 					rp->fc_mpon = (uint8_t *)prev;
1650 				}
1651 
1652 				mp->fc_mptr = 0;
1653 
1654 				emlxs_mpdata_sync(mp->dma_handle, 0, mp->size,
1655 				    DDI_DMA_SYNC_FORKERNEL);
1656 
1657 				HBASTATS.IpUbPosted--;
1658 
1659 				return (mp);
1660 			}
1661 
1662 			prev = mp;
1663 			mp = (MATCHMAP *)mp->fc_mptr;
1664 		}
1665 
1666 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1667 		    "IP Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1668 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1669 
1670 		break;
1671 
1672 #ifdef SFCT_SUPPORT
1673 	case FC_FCT_RING:
1674 		mp = (MATCHMAP *)rp->fc_mpoff;
1675 		prev = 0;
1676 
1677 		while (mp) {
1678 			if (mp->phys == mapbp) {
1679 				if (prev == 0) {
1680 					rp->fc_mpoff = mp->fc_mptr;
1681 				} else {
1682 					prev->fc_mptr = mp->fc_mptr;
1683 				}
1684 
1685 				if (rp->fc_mpon == (uint8_t *)mp) {
1686 					rp->fc_mpon = (uint8_t *)prev;
1687 				}
1688 
1689 				mp->fc_mptr = 0;
1690 
1691 				emlxs_mpdata_sync(mp->dma_handle, 0, mp->size,
1692 				    DDI_DMA_SYNC_FORKERNEL);
1693 
1694 				HBASTATS.FctUbPosted--;
1695 
1696 				return (mp);
1697 			}
1698 
1699 			prev = mp;
1700 			mp = (MATCHMAP *)mp->fc_mptr;
1701 		}
1702 
1703 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1704 		    "FCT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1705 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1706 
1707 		break;
1708 #endif /* SFCT_SUPPORT */
1709 	}
1710 
1711 	return (0);
1712 
1713 }  /* emlxs_mem_get_vaddr() */
1714 
1715 
1716 /*
1717  * Given a virtual address bp, generate the physical mapped address and
1718  * place it where addr points to. Save the address pair for lookup later.
1719  */
1720 extern void
1721 emlxs_mem_map_vaddr(emlxs_hba_t *hba, RING *rp, MATCHMAP *mp,
1722     uint32_t *haddr, uint32_t *laddr)
1723 {
1724 	switch (rp->ringno) {
1725 	case FC_ELS_RING:
1726 		/*
1727 		 * Update slot fc_mpon points to then bump it
1728 		 * fc_mpoff is pointer head of the list.
1729 		 * fc_mpon is pointer tail of the list.
1730 		 */
1731 		mp->fc_mptr = 0;
1732 		if (rp->fc_mpoff == 0) {
1733 			rp->fc_mpoff = (uint8_t *)mp;
1734 			rp->fc_mpon = (uint8_t *)mp;
1735 		} else {
1736 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1737 			    (uint8_t *)mp;
1738 			rp->fc_mpon = (uint8_t *)mp;
1739 		}
1740 
1741 		if (hba->flag & FC_SLIM2_MODE) {
1742 
1743 			/* return mapped address */
1744 			*haddr = putPaddrHigh(mp->phys);
1745 			/* return mapped address */
1746 			*laddr = putPaddrLow(mp->phys);
1747 		} else {
1748 			/* return mapped address */
1749 			*laddr = putPaddrLow(mp->phys);
1750 		}
1751 
1752 		HBASTATS.ElsUbPosted++;
1753 
1754 		break;
1755 
1756 	case FC_CT_RING:
1757 		/*
1758 		 * Update slot fc_mpon points to then bump it
1759 		 * fc_mpoff is pointer head of the list.
1760 		 * fc_mpon is pointer tail of the list.
1761 		 */
1762 		mp->fc_mptr = 0;
1763 		if (rp->fc_mpoff == 0) {
1764 			rp->fc_mpoff = (uint8_t *)mp;
1765 			rp->fc_mpon = (uint8_t *)mp;
1766 		} else {
1767 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1768 			    (uint8_t *)mp;
1769 			rp->fc_mpon = (uint8_t *)mp;
1770 		}
1771 
1772 		if (hba->flag & FC_SLIM2_MODE) {
1773 			/* return mapped address */
1774 			*haddr = putPaddrHigh(mp->phys);
1775 			/* return mapped address */
1776 			*laddr = putPaddrLow(mp->phys);
1777 		} else {
1778 			/* return mapped address */
1779 			*laddr = putPaddrLow(mp->phys);
1780 		}
1781 
1782 		HBASTATS.CtUbPosted++;
1783 
1784 		break;
1785 
1786 
1787 	case FC_IP_RING:
1788 		/*
1789 		 * Update slot fc_mpon points to then bump it
1790 		 * fc_mpoff is pointer head of the list.
1791 		 * fc_mpon is pointer tail of the list.
1792 		 */
1793 		mp->fc_mptr = 0;
1794 		if (rp->fc_mpoff == 0) {
1795 			rp->fc_mpoff = (uint8_t *)mp;
1796 			rp->fc_mpon = (uint8_t *)mp;
1797 		} else {
1798 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1799 			    (uint8_t *)mp;
1800 			rp->fc_mpon = (uint8_t *)mp;
1801 		}
1802 
1803 		if (hba->flag & FC_SLIM2_MODE) {
1804 			/* return mapped address */
1805 			*haddr = putPaddrHigh(mp->phys);
1806 			*laddr = putPaddrLow(mp->phys);
1807 		} else {
1808 			*laddr = putPaddrLow(mp->phys);
1809 		}
1810 
1811 		HBASTATS.IpUbPosted++;
1812 		break;
1813 
1814 
1815 #ifdef SFCT_SUPPORT
1816 	case FC_FCT_RING:
1817 		/*
1818 		 * Update slot fc_mpon points to then bump it
1819 		 * fc_mpoff is pointer head of the list.
1820 		 * fc_mpon is pointer tail of the list.
1821 		 */
1822 		mp->fc_mptr = 0;
1823 		if (rp->fc_mpoff == 0) {
1824 			rp->fc_mpoff = (uint8_t *)mp;
1825 			rp->fc_mpon = (uint8_t *)mp;
1826 		} else {
1827 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1828 			    (uint8_t *)mp;
1829 			rp->fc_mpon = (uint8_t *)mp;
1830 		}
1831 
1832 		if (hba->flag & FC_SLIM2_MODE) {
1833 			/* return mapped address */
1834 			*haddr = putPaddrHigh(mp->phys);
1835 			/* return mapped address */
1836 			*laddr = putPaddrLow(mp->phys);
1837 		} else {
1838 			/* return mapped address */
1839 			*laddr = putPaddrLow(mp->phys);
1840 		}
1841 
1842 		HBASTATS.FctUbPosted++;
1843 		break;
1844 #endif /* SFCT_SUPPORT */
1845 	}
1846 }  /* emlxs_mem_map_vaddr() */
1847 
1848 
1849 #ifdef SLI3_SUPPORT
1850 
1851 uint32_t
1852 emlxs_hbq_alloc(emlxs_hba_t *hba, uint32_t hbq_id)
1853 {
1854 	emlxs_port_t *port = &PPORT;
1855 	HBQ_INIT_t *hbq;
1856 	MBUF_INFO *buf_info;
1857 	MBUF_INFO bufinfo;
1858 
1859 	hbq = &hba->hbq_table[hbq_id];
1860 
1861 	if (hbq->HBQ_host_buf.virt == 0) {
1862 		buf_info = &bufinfo;
1863 
1864 		/* Get the system's page size in a DDI-compliant way. */
1865 		bzero(buf_info, sizeof (MBUF_INFO));
1866 		buf_info->size = hbq->HBQ_numEntries * sizeof (HBQE_t);
1867 		buf_info->flags = FC_MBUF_DMA;
1868 		buf_info->align = 4096;
1869 
1870 		(void) emlxs_mem_alloc(hba, buf_info);
1871 
1872 		if (buf_info->virt == NULL) {
1873 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
1874 			    "Unable to alloc HBQ.");
1875 			return (ENOMEM);
1876 		}
1877 
1878 		hbq->HBQ_host_buf.virt = (void *)buf_info->virt;
1879 		hbq->HBQ_host_buf.phys = buf_info->phys;
1880 		hbq->HBQ_host_buf.data_handle = buf_info->data_handle;
1881 		hbq->HBQ_host_buf.dma_handle = buf_info->dma_handle;
1882 		hbq->HBQ_host_buf.size = buf_info->size;
1883 		hbq->HBQ_host_buf.tag = hbq_id;
1884 
1885 		bzero((char *)hbq->HBQ_host_buf.virt, buf_info->size);
1886 	}
1887 
1888 	return (0);
1889 
1890 }  /* emlxs_hbq_alloc() */
1891 
1892 
1893 #endif /* SLI3_SUPPORT */
1894