1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Emulex.  All rights reserved.
24  * Use is subject to License terms.
25  */
26 
27 
28 #include "emlxs.h"
29 
30 EMLXS_MSG_DEF(EMLXS_MEM_C);
31 
32 
33 #ifdef SLI3_SUPPORT
34 static uint32_t emlxs_hbq_alloc(emlxs_hba_t *hba, uint32_t hbq_id);
35 static void emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id);
36 #endif	/* SLI3_SUPPORT */
37 
38 /*
39  *   emlxs_mem_alloc_buffer
40  *
41  *   This routine will allocate iocb/data buffer
42  *   space and setup the buffers for all rings on
43  *   the specified board to use. The data buffers
44  *   can be posted to the ring with the
45  *   fc_post_buffer routine.  The iocb buffers
46  *   are used to make a temp copy of the response
47  *   ring iocbs. Returns 0 if not enough memory,
48  *   Returns 1 if successful.
49  */
50 
51 
52 extern int32_t
53 emlxs_mem_alloc_buffer(emlxs_hba_t *hba)
54 {
55 	emlxs_port_t *port = &PPORT;
56 	emlxs_config_t *cfg;
57 	MBUF_INFO *buf_info;
58 	uint8_t *bp;
59 	uint8_t *oldbp;
60 	MEMSEG *mp;
61 	MATCHMAP *matp;
62 	NODELIST *ndlp;
63 	IOCBQ *iocbq;
64 	MAILBOXQ *mbox;
65 	MBUF_INFO bufinfo;
66 	int32_t i;
67 	RING *fcp_rp;
68 	RING *ip_rp;
69 	RING *els_rp;
70 	RING *ct_rp;
71 #ifdef EMLXS_SPARC
72 	int32_t j;
73 	ULP_BDE64 *v_bpl;
74 	ULP_BDE64 *p_bpl;
75 #endif	/* EMLXS_SPARC */
76 	uint32_t total_iotags;
77 
78 	buf_info = &bufinfo;
79 	cfg = &CFG;
80 
81 	mutex_enter(&EMLXS_MEMGET_LOCK);
82 
83 	/*
84 	 * Allocate and Initialize MEM_NLP (0)
85 	 */
86 	mp = &hba->memseg[MEM_NLP];
87 	mp->fc_memsize = sizeof (NODELIST);
88 	mp->fc_numblks = (int16_t)hba->max_nodes + 2;
89 	mp->fc_total_memsize = mp->fc_memsize * mp->fc_numblks;
90 	mp->fc_memstart_virt = kmem_zalloc(mp->fc_total_memsize, KM_NOSLEEP);
91 	mp->fc_memget_cnt = mp->fc_numblks;
92 	mp->fc_memput_cnt = 0;
93 	mp->fc_memstart_phys = 0;
94 	mp->fc_memflag = 0;
95 	mp->fc_lowmem = 0;
96 	mp->fc_mem_dma_handle = 0;
97 	mp->fc_mem_dat_handle = 0;
98 	mp->fc_memget_ptr = 0;
99 	mp->fc_memget_end = 0;
100 	mp->fc_memput_ptr = 0;
101 	mp->fc_memput_end = 0;
102 
103 	if (mp->fc_memstart_virt == NULL) {
104 		mutex_exit(&EMLXS_MEMGET_LOCK);
105 
106 		(void) emlxs_mem_free_buffer(hba);
107 
108 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
109 		    "NLP memory pool.");
110 
111 		return (0);
112 	}
113 	bzero(mp->fc_memstart_virt, mp->fc_memsize);
114 	ndlp = (NODELIST *) mp->fc_memstart_virt;
115 
116 	/*
117 	 * Link buffer into beginning of list. The first pointer in each
118 	 * buffer is a forward pointer to the next buffer.
119 	 */
120 	for (i = 0; i < mp->fc_numblks; i++, ndlp++) {
121 		ndlp->flag |= NODE_POOL_ALLOCATED;
122 
123 		oldbp = mp->fc_memget_ptr;
124 		bp = (uint8_t *)ndlp;
125 		if (oldbp == NULL) {
126 			mp->fc_memget_end = bp;
127 		}
128 		mp->fc_memget_ptr = bp;
129 		*((uint8_t **)bp) = oldbp;
130 	}
131 
132 
133 	/*
134 	 * Allocate and Initialize MEM_IOCB (1)
135 	 */
136 	mp = &hba->memseg[MEM_IOCB];
137 	mp->fc_memsize = sizeof (IOCBQ);
138 	mp->fc_numblks = (uint16_t)cfg[CFG_NUM_IOCBS].current;
139 	mp->fc_total_memsize = mp->fc_memsize * mp->fc_numblks;
140 	mp->fc_memstart_virt = kmem_zalloc(mp->fc_total_memsize, KM_NOSLEEP);
141 	mp->fc_lowmem = (mp->fc_numblks >> 4);
142 	mp->fc_memget_cnt = mp->fc_numblks;
143 	mp->fc_memput_cnt = 0;
144 	mp->fc_memflag = 0;
145 	mp->fc_memstart_phys = 0;
146 	mp->fc_mem_dma_handle = 0;
147 	mp->fc_mem_dat_handle = 0;
148 	mp->fc_memget_ptr = 0;
149 	mp->fc_memget_end = 0;
150 	mp->fc_memput_ptr = 0;
151 	mp->fc_memput_end = 0;
152 
153 	if (mp->fc_memstart_virt == NULL) {
154 		mutex_exit(&EMLXS_MEMGET_LOCK);
155 
156 		(void) emlxs_mem_free_buffer(hba);
157 
158 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
159 		    "IOCB memory pool.");
160 
161 		return (0);
162 	}
163 	bzero(mp->fc_memstart_virt, mp->fc_memsize);
164 	iocbq = (IOCBQ *) mp->fc_memstart_virt;
165 
166 	/*
167 	 * Link buffer into beginning of list. The first pointer in each
168 	 * buffer is a forward pointer to the next buffer.
169 	 */
170 	for (i = 0; i < mp->fc_numblks; i++, iocbq++) {
171 		iocbq->flag |= IOCB_POOL_ALLOCATED;
172 
173 		oldbp = mp->fc_memget_ptr;
174 		bp = (uint8_t *)iocbq;
175 		if (oldbp == NULL) {
176 			mp->fc_memget_end = bp;
177 		}
178 		mp->fc_memget_ptr = bp;
179 		*((uint8_t **)bp) = oldbp;
180 	}
181 
182 	/*
183 	 * Allocate and Initialize MEM_MBOX (2)
184 	 */
185 	mp = &hba->memseg[MEM_MBOX];
186 	mp->fc_memsize = sizeof (MAILBOXQ);
187 	mp->fc_numblks = (int16_t)hba->max_nodes + 32;
188 	mp->fc_total_memsize = mp->fc_memsize * mp->fc_numblks;
189 	mp->fc_memstart_virt = kmem_zalloc(mp->fc_total_memsize, KM_NOSLEEP);
190 	mp->fc_lowmem = (mp->fc_numblks >> 3);
191 	mp->fc_memget_cnt = mp->fc_numblks;
192 	mp->fc_memput_cnt = 0;
193 	mp->fc_memflag = 0;
194 	mp->fc_memstart_phys = 0;
195 	mp->fc_mem_dma_handle = 0;
196 	mp->fc_mem_dat_handle = 0;
197 	mp->fc_memget_ptr = 0;
198 	mp->fc_memget_end = 0;
199 	mp->fc_memput_ptr = 0;
200 	mp->fc_memput_end = 0;
201 
202 	if (mp->fc_memstart_virt == NULL) {
203 		mutex_exit(&EMLXS_MEMGET_LOCK);
204 
205 		(void) emlxs_mem_free_buffer(hba);
206 
207 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
208 		    "MBOX memory pool.");
209 
210 		return (0);
211 	}
212 	bzero(mp->fc_memstart_virt, mp->fc_memsize);
213 	mbox = (MAILBOXQ *) mp->fc_memstart_virt;
214 
215 	/*
216 	 * Link buffer into beginning of list. The first pointer in each
217 	 * buffer is a forward pointer to the next buffer.
218 	 */
219 	for (i = 0; i < mp->fc_numblks; i++, mbox++) {
220 		mbox->flag |= MBQ_POOL_ALLOCATED;
221 
222 		oldbp = mp->fc_memget_ptr;
223 		bp = (uint8_t *)mbox;
224 		if (oldbp == NULL) {
225 			mp->fc_memget_end = bp;
226 		}
227 		mp->fc_memget_ptr = bp;
228 		*((uint8_t **)bp) = oldbp;
229 	}
230 
231 	/*
232 	 * Initialize fc_table
233 	 */
234 	fcp_rp = &hba->ring[FC_FCP_RING];
235 	ip_rp = &hba->ring[FC_IP_RING];
236 	els_rp = &hba->ring[FC_ELS_RING];
237 	ct_rp = &hba->ring[FC_CT_RING];
238 
239 	fcp_rp->max_iotag = cfg[CFG_NUM_IOTAGS].current;
240 	ip_rp->max_iotag = hba->max_nodes;
241 	els_rp->max_iotag = hba->max_nodes;
242 	ct_rp->max_iotag = hba->max_nodes;
243 
244 	/* Allocate the fc_table */
245 	total_iotags = fcp_rp->max_iotag + ip_rp->max_iotag +
246 	    els_rp->max_iotag + ct_rp->max_iotag;
247 
248 	bzero(buf_info, sizeof (MBUF_INFO));
249 	buf_info->size = total_iotags * sizeof (emlxs_buf_t *);
250 	buf_info->align = sizeof (void *);
251 
252 	(void) emlxs_mem_alloc(hba, buf_info);
253 	if (buf_info->virt == NULL) {
254 		mutex_exit(&EMLXS_MEMGET_LOCK);
255 
256 		(void) emlxs_mem_free_buffer(hba);
257 
258 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
259 		    "fc_table buffer.");
260 
261 		return (0);
262 	}
263 	hba->iotag_table = buf_info->virt;
264 	fcp_rp->fc_table = &hba->iotag_table[0];
265 	ip_rp->fc_table = &hba->iotag_table[fcp_rp->max_iotag];
266 	els_rp->fc_table = &hba->iotag_table[fcp_rp->max_iotag +
267 	    ip_rp->max_iotag];
268 	ct_rp->fc_table = &hba->iotag_table[fcp_rp->max_iotag +
269 	    ip_rp->max_iotag + els_rp->max_iotag];
270 
271 #ifdef EMLXS_SPARC
272 	/*
273 	 * Allocate and Initialize FCP MEM_BPL's. This is for increased
274 	 * performance on sparc
275 	 */
276 
277 	bzero(buf_info, sizeof (MBUF_INFO));
278 	buf_info->size = fcp_rp->max_iotag * sizeof (MATCHMAP);
279 	buf_info->align = sizeof (void *);
280 
281 	(void) emlxs_mem_alloc(hba, buf_info);
282 	if (buf_info->virt == NULL) {
283 		mutex_exit(&EMLXS_MEMGET_LOCK);
284 
285 		(void) emlxs_mem_free_buffer(hba);
286 
287 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
288 		    "FCP BPL table buffer.");
289 
290 		return (0);
291 	}
292 	hba->fcp_bpl_table = buf_info->virt;
293 	bzero(hba->fcp_bpl_table, buf_info->size);
294 
295 	bzero(buf_info, sizeof (MBUF_INFO));
296 	buf_info->size = (fcp_rp->max_iotag * (3 * sizeof (ULP_BDE64)));
297 	buf_info->flags = FC_MBUF_DMA;
298 	buf_info->align = 32;
299 
300 	(void) emlxs_mem_alloc(hba, buf_info);
301 	if (buf_info->virt == NULL) {
302 		mutex_exit(&EMLXS_MEMGET_LOCK);
303 
304 		(void) emlxs_mem_free_buffer(hba);
305 
306 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
307 		    "FCP BPL DMA buffers.");
308 
309 		return (0);
310 	}
311 	bzero(buf_info->virt, buf_info->size);
312 
313 	hba->fcp_bpl_mp.size = buf_info->size;
314 	hba->fcp_bpl_mp.virt = buf_info->virt;
315 	hba->fcp_bpl_mp.phys = buf_info->phys;
316 	hba->fcp_bpl_mp.data_handle = buf_info->data_handle;
317 	hba->fcp_bpl_mp.dma_handle = buf_info->dma_handle;
318 	hba->fcp_bpl_mp.tag = NULL;
319 
320 	v_bpl = (ULP_BDE64 *) hba->fcp_bpl_mp.virt;
321 	p_bpl = (ULP_BDE64 *) hba->fcp_bpl_mp.phys;
322 	for (i = 0, j = 0; i < fcp_rp->max_iotag; i++, j += 3) {
323 		matp = &hba->fcp_bpl_table[i];
324 
325 		matp->fc_mptr = NULL;
326 		matp->size = (3 * sizeof (ULP_BDE64));
327 		matp->virt = (uint8_t *)& v_bpl[j];
328 		matp->phys = (uint64_t)& p_bpl[j];
329 		matp->dma_handle = NULL;
330 		matp->data_handle = NULL;
331 		matp->tag = MEM_BPL;
332 		matp->flag |= MAP_TABLE_ALLOCATED;
333 	}
334 
335 #endif	/* EMLXS_SPARC */
336 
337 	/*
338 	 * Allocate and Initialize MEM_BPL (3)
339 	 */
340 
341 	mp = &hba->memseg[MEM_BPL];
342 	mp->fc_memsize = hba->mem_bpl_size;	/* Set during attach */
343 	mp->fc_numblks = (uint16_t)cfg[CFG_NUM_IOCBS].current;
344 	mp->fc_memflag = FC_MEM_DMA;
345 	mp->fc_lowmem = (mp->fc_numblks >> 4);
346 	mp->fc_memstart_virt = 0;
347 	mp->fc_memstart_phys = 0;
348 	mp->fc_mem_dma_handle = 0;
349 	mp->fc_mem_dat_handle = 0;
350 	mp->fc_memget_ptr = 0;
351 	mp->fc_memget_end = 0;
352 	mp->fc_memput_ptr = 0;
353 	mp->fc_memput_end = 0;
354 	mp->fc_total_memsize = 0;
355 	mp->fc_memget_cnt = mp->fc_numblks;
356 	mp->fc_memput_cnt = 0;
357 
358 	/* Allocate buffer pools for above buffer structures */
359 	for (i = 0; i < mp->fc_numblks; i++) {
360 		/*
361 		 * If this is a DMA buffer we need alignment on a page so we
362 		 * don't want to worry about buffers spanning page boundries
363 		 * when mapping memory for the adapter.
364 		 */
365 		bzero(buf_info, sizeof (MBUF_INFO));
366 		buf_info->size = sizeof (MATCHMAP);
367 		buf_info->align = sizeof (void *);
368 
369 		(void) emlxs_mem_alloc(hba, buf_info);
370 		if (buf_info->virt == NULL) {
371 			mutex_exit(&EMLXS_MEMGET_LOCK);
372 
373 			(void) emlxs_mem_free_buffer(hba);
374 
375 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
376 			    "BPL segment buffer.");
377 
378 			return (0);
379 		}
380 		matp = (MATCHMAP *) buf_info->virt;
381 		bzero(matp, sizeof (MATCHMAP));
382 
383 		bzero(buf_info, sizeof (MBUF_INFO));
384 		buf_info->size = mp->fc_memsize;
385 		buf_info->flags = FC_MBUF_DMA;
386 		buf_info->align = 32;
387 
388 		(void) emlxs_mem_alloc(hba, buf_info);
389 		if (buf_info->virt == NULL) {
390 			mutex_exit(&EMLXS_MEMGET_LOCK);
391 
392 			(void) emlxs_mem_free_buffer(hba);
393 
394 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
395 			    "BPL DMA buffer.");
396 
397 			return (0);
398 		}
399 		bp = (uint8_t *)buf_info->virt;
400 		bzero(bp, mp->fc_memsize);
401 
402 		/*
403 		 * Link buffer into beginning of list. The first pointer in
404 		 * each buffer is a forward pointer to the next buffer.
405 		 */
406 		oldbp = mp->fc_memget_ptr;
407 
408 		if (oldbp == 0) {
409 			mp->fc_memget_end = (uint8_t *)matp;
410 		}
411 		mp->fc_memget_ptr = (uint8_t *)matp;
412 		matp->fc_mptr = oldbp;
413 		matp->virt = buf_info->virt;
414 		matp->phys = buf_info->phys;
415 		matp->size = buf_info->size;
416 		matp->dma_handle = buf_info->dma_handle;
417 		matp->data_handle = buf_info->data_handle;
418 		matp->tag = MEM_BPL;
419 		matp->flag |= MAP_POOL_ALLOCATED;
420 	}
421 
422 
423 	/*
424 	 * These represent the unsolicited ELS buffers we preallocate.
425 	 */
426 
427 	mp = &hba->memseg[MEM_BUF];
428 	mp->fc_memsize = MEM_BUF_SIZE;
429 	mp->fc_numblks = MEM_ELSBUF_COUNT + MEM_BUF_COUNT;
430 	mp->fc_memflag = FC_MEM_DMA;
431 	mp->fc_lowmem = 3;
432 	mp->fc_memstart_virt = 0;
433 	mp->fc_memstart_phys = 0;
434 	mp->fc_mem_dma_handle = 0;
435 	mp->fc_mem_dat_handle = 0;
436 	mp->fc_memget_ptr = 0;
437 	mp->fc_memget_end = 0;
438 	mp->fc_memput_ptr = 0;
439 	mp->fc_memput_end = 0;
440 	mp->fc_total_memsize = 0;
441 	mp->fc_memget_cnt = mp->fc_numblks;
442 	mp->fc_memput_cnt = 0;
443 
444 	/* Allocate buffer pools for above buffer structures */
445 	for (i = 0; i < mp->fc_numblks; i++) {
446 		/*
447 		 * If this is a DMA buffer we need alignment on a page so we
448 		 * don't want to worry about buffers spanning page boundries
449 		 * when mapping memory for the adapter.
450 		 */
451 		bzero(buf_info, sizeof (MBUF_INFO));
452 		buf_info->size = sizeof (MATCHMAP);
453 		buf_info->align = sizeof (void *);
454 
455 		(void) emlxs_mem_alloc(hba, buf_info);
456 		if (buf_info->virt == NULL) {
457 			mutex_exit(&EMLXS_MEMGET_LOCK);
458 
459 			(void) emlxs_mem_free_buffer(hba);
460 
461 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
462 			    "MEM_BUF Segment buffer.");
463 
464 			return (0);
465 		}
466 		matp = (MATCHMAP *) buf_info->virt;
467 		bzero(matp, sizeof (MATCHMAP));
468 
469 		bzero(buf_info, sizeof (MBUF_INFO));
470 		buf_info->size = mp->fc_memsize;
471 		buf_info->flags = FC_MBUF_DMA;
472 		buf_info->align = 32;
473 
474 		(void) emlxs_mem_alloc(hba, buf_info);
475 		if (buf_info->virt == NULL) {
476 			mutex_exit(&EMLXS_MEMGET_LOCK);
477 
478 			(void) emlxs_mem_free_buffer(hba);
479 
480 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
481 			    "MEM_BUF DMA buffer.");
482 
483 			return (0);
484 		}
485 		bp = (uint8_t *)buf_info->virt;
486 		bzero(bp, mp->fc_memsize);
487 
488 		/*
489 		 * Link buffer into beginning of list. The first pointer in
490 		 * each buffer is a forward pointer to the next buffer.
491 		 */
492 		oldbp = mp->fc_memget_ptr;
493 
494 		if (oldbp == 0) {
495 			mp->fc_memget_end = (uint8_t *)matp;
496 		}
497 		mp->fc_memget_ptr = (uint8_t *)matp;
498 		matp->fc_mptr = oldbp;
499 		matp->virt = buf_info->virt;
500 		matp->phys = buf_info->phys;
501 		matp->size = buf_info->size;
502 		matp->dma_handle = buf_info->dma_handle;
503 		matp->data_handle = buf_info->data_handle;
504 		matp->tag = MEM_BUF;
505 		matp->flag |= MAP_POOL_ALLOCATED;
506 	}
507 
508 
509 	/*
510 	 * These represent the unsolicited IP buffers we preallocate.
511 	 */
512 
513 	mp = &hba->memseg[MEM_IPBUF];
514 	mp->fc_memsize = MEM_IPBUF_SIZE;
515 	mp->fc_numblks = MEM_IPBUF_COUNT;
516 	mp->fc_memflag = FC_MEM_DMA;
517 	mp->fc_lowmem = 3;
518 	mp->fc_memstart_virt = 0;
519 	mp->fc_memstart_phys = 0;
520 	mp->fc_mem_dma_handle = 0;
521 	mp->fc_mem_dat_handle = 0;
522 	mp->fc_memget_ptr = 0;
523 	mp->fc_memget_end = 0;
524 	mp->fc_memput_ptr = 0;
525 	mp->fc_memput_end = 0;
526 	mp->fc_total_memsize = 0;
527 	mp->fc_memget_cnt = mp->fc_numblks;
528 	mp->fc_memput_cnt = 0;
529 
530 	/* Allocate buffer pools for above buffer structures */
531 	for (i = 0; i < mp->fc_numblks; i++) {
532 		/*
533 		 * If this is a DMA buffer we need alignment on a page so we
534 		 * don't want to worry about buffers spanning page boundries
535 		 * when mapping memory for the adapter.
536 		 */
537 		bzero(buf_info, sizeof (MBUF_INFO));
538 		buf_info->size = sizeof (MATCHMAP);
539 		buf_info->align = sizeof (void *);
540 
541 		(void) emlxs_mem_alloc(hba, buf_info);
542 		if (buf_info->virt == NULL) {
543 			mutex_exit(&EMLXS_MEMGET_LOCK);
544 
545 			(void) emlxs_mem_free_buffer(hba);
546 
547 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
548 			    "IP_BUF Segment buffer.");
549 
550 			return (0);
551 		}
552 		matp = (MATCHMAP *) buf_info->virt;
553 		bzero(matp, sizeof (MATCHMAP));
554 
555 		bzero(buf_info, sizeof (MBUF_INFO));
556 		buf_info->size = mp->fc_memsize;
557 		buf_info->flags = FC_MBUF_DMA;
558 		buf_info->align = 32;
559 
560 		(void) emlxs_mem_alloc(hba, buf_info);
561 		if (buf_info->virt == NULL) {
562 			mutex_exit(&EMLXS_MEMGET_LOCK);
563 
564 			(void) emlxs_mem_free_buffer(hba);
565 
566 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
567 			    "IP_BUF DMA buffer.");
568 
569 			return (0);
570 		}
571 		bp = (uint8_t *)buf_info->virt;
572 		bzero(bp, mp->fc_memsize);
573 
574 		/*
575 		 * Link buffer into beginning of list. The first pointer in
576 		 * each buffer is a forward pointer to the next buffer.
577 		 */
578 		oldbp = mp->fc_memget_ptr;
579 
580 		if (oldbp == 0) {
581 			mp->fc_memget_end = (uint8_t *)matp;
582 		}
583 		mp->fc_memget_ptr = (uint8_t *)matp;
584 		matp->fc_mptr = oldbp;
585 		matp->virt = buf_info->virt;
586 		matp->phys = buf_info->phys;
587 		matp->size = buf_info->size;
588 		matp->dma_handle = buf_info->dma_handle;
589 		matp->data_handle = buf_info->data_handle;
590 		matp->tag = MEM_IPBUF;
591 		matp->flag |= MAP_POOL_ALLOCATED;
592 	}
593 
594 	/*
595 	 * These represent the unsolicited CT buffers we preallocate.
596 	 */
597 	mp = &hba->memseg[MEM_CTBUF];
598 	mp->fc_memsize = MEM_CTBUF_SIZE;
599 	mp->fc_numblks = MEM_CTBUF_COUNT;
600 	mp->fc_memflag = FC_MEM_DMA;
601 	mp->fc_lowmem = 0;
602 	mp->fc_memstart_virt = 0;
603 	mp->fc_memstart_phys = 0;
604 	mp->fc_mem_dma_handle = 0;
605 	mp->fc_mem_dat_handle = 0;
606 	mp->fc_memget_ptr = 0;
607 	mp->fc_memget_end = 0;
608 	mp->fc_memput_ptr = 0;
609 	mp->fc_memput_end = 0;
610 	mp->fc_total_memsize = 0;
611 	mp->fc_memget_cnt = mp->fc_numblks;
612 	mp->fc_memput_cnt = 0;
613 
614 	/* Allocate buffer pools for above buffer structures */
615 	for (i = 0; i < mp->fc_numblks; i++) {
616 		/*
617 		 * If this is a DMA buffer we need alignment on a page so we
618 		 * don't want to worry about buffers spanning page boundries
619 		 * when mapping memory for the adapter.
620 		 */
621 		bzero(buf_info, sizeof (MBUF_INFO));
622 		buf_info->size = sizeof (MATCHMAP);
623 		buf_info->align = sizeof (void *);
624 
625 		(void) emlxs_mem_alloc(hba, buf_info);
626 		if (buf_info->virt == NULL) {
627 			mutex_exit(&EMLXS_MEMGET_LOCK);
628 
629 			(void) emlxs_mem_free_buffer(hba);
630 
631 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
632 			    "CT_BUF Segment buffer.");
633 
634 			return (0);
635 		}
636 		matp = (MATCHMAP *) buf_info->virt;
637 		bzero(matp, sizeof (MATCHMAP));
638 
639 		bzero(buf_info, sizeof (MBUF_INFO));
640 		buf_info->size = mp->fc_memsize;
641 		buf_info->flags = FC_MBUF_DMA;
642 		buf_info->align = 32;
643 
644 		(void) emlxs_mem_alloc(hba, buf_info);
645 		if (buf_info->virt == NULL) {
646 			mutex_exit(&EMLXS_MEMGET_LOCK);
647 
648 			(void) emlxs_mem_free_buffer(hba);
649 
650 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
651 			    "CT_BUF DMA buffer.");
652 
653 			return (0);
654 		}
655 		bp = (uint8_t *)buf_info->virt;
656 		bzero(bp, mp->fc_memsize);
657 
658 		/*
659 		 * Link buffer into beginning of list. The first pointer in
660 		 * each buffer is a forward pointer to the next buffer.
661 		 */
662 		oldbp = mp->fc_memget_ptr;
663 
664 		if (oldbp == 0) {
665 			mp->fc_memget_end = (uint8_t *)matp;
666 		}
667 		mp->fc_memget_ptr = (uint8_t *)matp;
668 		matp->fc_mptr = oldbp;
669 		matp->virt = buf_info->virt;
670 		matp->phys = buf_info->phys;
671 		matp->size = buf_info->size;
672 		matp->dma_handle = buf_info->dma_handle;
673 		matp->data_handle = buf_info->data_handle;
674 		matp->tag = MEM_CTBUF;
675 		matp->flag |= MAP_POOL_ALLOCATED;
676 	}
677 
678 #ifdef SFCT_SUPPORT
679 
680 	/*
681 	 * These represent the unsolicited FCT buffers we preallocate.
682 	 */
683 	mp = &hba->memseg[MEM_FCTBUF];
684 	mp->fc_memsize = MEM_FCTBUF_SIZE;
685 	mp->fc_numblks = (hba->tgt_mode) ? MEM_FCTBUF_COUNT : 0;
686 	mp->fc_memflag = FC_MEM_DMA;
687 	mp->fc_lowmem = 0;
688 	mp->fc_memstart_virt = 0;
689 	mp->fc_memstart_phys = 0;
690 	mp->fc_mem_dma_handle = 0;
691 	mp->fc_mem_dat_handle = 0;
692 	mp->fc_memget_ptr = 0;
693 	mp->fc_memget_end = 0;
694 	mp->fc_memput_ptr = 0;
695 	mp->fc_memput_end = 0;
696 	mp->fc_total_memsize = 0;
697 	mp->fc_memget_cnt = mp->fc_numblks;
698 	mp->fc_memput_cnt = 0;
699 
700 	/* Allocate buffer pools for above buffer structures */
701 	for (i = 0; i < mp->fc_numblks; i++) {
702 		/*
703 		 * If this is a DMA buffer we need alignment on a page so we
704 		 * don't want to worry about buffers spanning page boundries
705 		 * when mapping memory for the adapter.
706 		 */
707 		bzero(buf_info, sizeof (MBUF_INFO));
708 		buf_info->size = sizeof (MATCHMAP);
709 		buf_info->align = sizeof (void *);
710 
711 		(void) emlxs_mem_alloc(hba, buf_info);
712 		if (buf_info->virt == NULL) {
713 			mutex_exit(&EMLXS_MEMGET_LOCK);
714 
715 			(void) emlxs_mem_free_buffer(hba);
716 
717 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
718 			    "FCT_BUF Segment buffer.");
719 
720 			return (0);
721 		}
722 		matp = (MATCHMAP *) buf_info->virt;
723 		bzero(matp, sizeof (MATCHMAP));
724 
725 		bzero(buf_info, sizeof (MBUF_INFO));
726 		buf_info->size = mp->fc_memsize;
727 		buf_info->flags = FC_MBUF_DMA;
728 		buf_info->align = 32;
729 
730 		(void) emlxs_mem_alloc(hba, buf_info);
731 		if (buf_info->virt == NULL) {
732 			mutex_exit(&EMLXS_MEMGET_LOCK);
733 
734 			(void) emlxs_mem_free_buffer(hba);
735 
736 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
737 			    "FCT_BUF DMA buffer.");
738 
739 			return (0);
740 		}
741 		bp = (uint8_t *)buf_info->virt;
742 		bzero(bp, mp->fc_memsize);
743 
744 		/*
745 		 * Link buffer into beginning of list. The first pointer in
746 		 * each buffer is a forward pointer to the next buffer.
747 		 */
748 		oldbp = mp->fc_memget_ptr;
749 
750 		if (oldbp == 0) {
751 			mp->fc_memget_end = (uint8_t *)matp;
752 		}
753 		mp->fc_memget_ptr = (uint8_t *)matp;
754 		matp->fc_mptr = oldbp;
755 		matp->virt = buf_info->virt;
756 		matp->phys = buf_info->phys;
757 		matp->size = buf_info->size;
758 		matp->dma_handle = buf_info->dma_handle;
759 		matp->data_handle = buf_info->data_handle;
760 		matp->tag = MEM_FCTBUF;
761 		matp->flag |= MAP_POOL_ALLOCATED;
762 	}
763 #endif	/* SFCT_SUPPORT */
764 
765 	for (i = 0; i < FC_MAX_SEG; i++) {
766 		char *seg;
767 
768 		switch (i) {
769 		case MEM_NLP:
770 			seg = "MEM_NLP";
771 			break;
772 		case MEM_IOCB:
773 			seg = "MEM_IOCB";
774 			break;
775 		case MEM_MBOX:
776 			seg = "MEM_MBOX";
777 			break;
778 		case MEM_BPL:
779 			seg = "MEM_BPL";
780 			break;
781 		case MEM_BUF:
782 			seg = "MEM_BUF";
783 			break;
784 		case MEM_IPBUF:
785 			seg = "MEM_IPBUF";
786 			break;
787 		case MEM_CTBUF:
788 			seg = "MEM_CTBUF";
789 			break;
790 #ifdef SFCT_SUPPORT
791 		case MEM_FCTBUF:
792 			seg = "MEM_FCTBUF";
793 			break;
794 #endif	/* SFCT_SUPPORT */
795 		default:
796 			break;
797 		}
798 
799 		mp = &hba->memseg[i];
800 
801 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
802 		    "Segment: %s mp=%p size=%x count=%d flags=%x base=%p",
803 		    seg, mp, mp->fc_memsize, mp->fc_numblks, mp->fc_memflag,
804 		    mp->fc_memget_ptr);
805 	}
806 
807 	mutex_exit(&EMLXS_MEMGET_LOCK);
808 
809 	return (1);
810 
811 } /* emlxs_mem_alloc_buffer() */
812 
813 
814 
815 /*
816  *   emlxs_mem_free_buffer
817  *
818  *   This routine will free iocb/data buffer space
819  *   and TGTM resource.
820  */
821 extern int
822 emlxs_mem_free_buffer(emlxs_hba_t *hba)
823 {
824 	emlxs_port_t *port = &PPORT;
825 	emlxs_port_t *vport;
826 	int32_t j;
827 	uint8_t *bp;
828 	MEMSEG *mp;
829 	MATCHMAP *mm;
830 	RING *rp;
831 	IOCBQ *iocbq;
832 	IOCB *iocb;
833 	MAILBOXQ *mbox, *mbsave;
834 	MBUF_INFO *buf_info;
835 	MBUF_INFO bufinfo;
836 	emlxs_buf_t *sbp;
837 	fc_unsol_buf_t *ubp;
838 	RING *fcp_rp;
839 	RING *ip_rp;
840 	RING *els_rp;
841 	RING *ct_rp;
842 	uint32_t total_iotags;
843 	emlxs_ub_priv_t *ub_priv;
844 
845 	buf_info = &bufinfo;
846 
847 	/* Check for deferred pkt completion */
848 	if (hba->mbox_sbp) {
849 		sbp = (emlxs_buf_t *)hba->mbox_sbp;
850 		hba->mbox_sbp = 0;
851 
852 		emlxs_pkt_complete(sbp, -1, 0, 1);
853 	}
854 	/* Check for deferred ub completion */
855 	if (hba->mbox_ubp) {
856 		ubp = (fc_unsol_buf_t *)hba->mbox_ubp;
857 		ub_priv = (emlxs_ub_priv_t *)ubp->ub_fca_private;
858 		port = ub_priv->port;
859 		hba->mbox_ubp = 0;
860 
861 		emlxs_ub_callback(port, ubp);
862 	}
863 	/* Check for deferred iocb tx */
864 	if (hba->mbox_iocbq) {	/* iocb */
865 		iocbq = (IOCBQ *) hba->mbox_iocbq;
866 		hba->mbox_iocbq = 0;
867 		iocb = &iocbq->iocb;
868 
869 		/* Set the error status of the iocb */
870 		iocb->ulpStatus = IOSTAT_LOCAL_REJECT;
871 		iocb->un.grsp.perr.statLocalError = IOERR_ABORT_REQUESTED;
872 
873 		switch (iocb->ulpCommand) {
874 		case CMD_FCP_ICMND_CR:
875 		case CMD_FCP_ICMND_CX:
876 		case CMD_FCP_IREAD_CR:
877 		case CMD_FCP_IREAD_CX:
878 		case CMD_FCP_IWRITE_CR:
879 		case CMD_FCP_IWRITE_CX:
880 		case CMD_FCP_ICMND64_CR:
881 		case CMD_FCP_ICMND64_CX:
882 		case CMD_FCP_IREAD64_CR:
883 		case CMD_FCP_IREAD64_CX:
884 		case CMD_FCP_IWRITE64_CR:
885 		case CMD_FCP_IWRITE64_CX:
886 			rp = &hba->ring[FC_FCP_RING];
887 			emlxs_handle_fcp_event(hba, rp, iocbq);
888 			break;
889 
890 		case CMD_ELS_REQUEST_CR:
891 		case CMD_ELS_REQUEST_CX:
892 		case CMD_XMIT_ELS_RSP_CX:
893 		case CMD_ELS_REQUEST64_CR:	/* This is the only one used */
894 						/*   currently for deferred */
895 						/*   iocb tx */
896 		case CMD_ELS_REQUEST64_CX:
897 		case CMD_XMIT_ELS_RSP64_CX:
898 			rp = &hba->ring[FC_ELS_RING];
899 			(void) emlxs_els_handle_event(hba, rp, iocbq);
900 			break;
901 
902 		case CMD_GEN_REQUEST64_CR:
903 		case CMD_GEN_REQUEST64_CX:
904 			rp = &hba->ring[FC_CT_RING];
905 			(void) emlxs_ct_handle_event(hba, rp, iocbq);
906 			break;
907 
908 		default:
909 			rp = (RING *) iocbq->ring;
910 
911 			if (rp) {
912 				if (rp->ringno == FC_ELS_RING) {
913 					(void) emlxs_mem_put(hba, MEM_ELSBUF,
914 					    (uint8_t *)iocbq->bp);
915 				} else if (rp->ringno == FC_CT_RING) {
916 					(void) emlxs_mem_put(hba, MEM_CTBUF,
917 					    (uint8_t *)iocbq->bp);
918 				} else if (rp->ringno == FC_IP_RING) {
919 					(void) emlxs_mem_put(hba, MEM_IPBUF,
920 					    (uint8_t *)iocbq->bp);
921 				}
922 #ifdef SFCT_SUPPORT
923 				else if (rp->ringno == FC_FCT_RING) {
924 					(void) emlxs_mem_put(hba, MEM_FCTBUF,
925 					    (uint8_t *)iocbq->bp);
926 				}
927 #endif	/* SFCT_SUPPORT */
928 
929 			} else if (iocbq->bp) {
930 				(void) emlxs_mem_put(hba, MEM_BUF,
931 				    (uint8_t *)iocbq->bp);
932 			}
933 			if (!iocbq->sbp) {
934 				(void) emlxs_mem_put(hba, MEM_IOCB,
935 				    (uint8_t *)iocbq);
936 			}
937 		}
938 	}
939 	/* free the mapped address match area for each ring */
940 	for (j = 0; j < hba->ring_count; j++) {
941 		rp = &hba->ring[j];
942 
943 		/* Flush the ring */
944 		(void) emlxs_tx_ring_flush(hba, rp, 0);
945 
946 		while (rp->fc_mpoff) {
947 			uint64_t addr;
948 
949 			addr = 0;
950 			mm = (MATCHMAP *) (rp->fc_mpoff);
951 
952 			if ((j == FC_ELS_RING) ||
953 			    (j == FC_CT_RING) ||
954 #ifdef SFCT_SUPPORT
955 			    (j == FC_FCT_RING) ||
956 #endif	/* SFCT_SUPPORT */
957 			    (j == FC_IP_RING)) {
958 				addr = mm->phys;
959 			}
960 			if ((mm = emlxs_mem_get_vaddr(hba, rp, addr))) {
961 				if (j == FC_ELS_RING) {
962 					(void) emlxs_mem_put(hba, MEM_ELSBUF,
963 					    (uint8_t *)mm);
964 				} else if (j == FC_CT_RING) {
965 					(void) emlxs_mem_put(hba, MEM_CTBUF,
966 					    (uint8_t *)mm);
967 				} else if (j == FC_IP_RING) {
968 					(void) emlxs_mem_put(hba, MEM_IPBUF,
969 					    (uint8_t *)mm);
970 				}
971 #ifdef SFCT_SUPPORT
972 				else if (j == FC_FCT_RING) {
973 					(void) emlxs_mem_put(hba, MEM_FCTBUF,
974 					    (uint8_t *)mm);
975 				}
976 #endif	/* SFCT_SUPPORT */
977 
978 			}
979 		}
980 	}
981 
982 #ifdef SLI3_SUPPORT
983 	if (hba->flag & FC_HBQ_ENABLED) {
984 		emlxs_hbq_free_all(hba, EMLXS_ELS_HBQ_ID);
985 		emlxs_hbq_free_all(hba, EMLXS_IP_HBQ_ID);
986 		emlxs_hbq_free_all(hba, EMLXS_CT_HBQ_ID);
987 #ifdef SFCT_SUPPORT
988 		if (hba->tgt_mode) {
989 			emlxs_hbq_free_all(hba, EMLXS_FCT_HBQ_ID);
990 		}
991 #endif	/* SFCT_SUPPORT */
992 
993 	}
994 #endif	/* SLI3_SUPPORT */
995 
996 	/* Free everything on mbox queue */
997 	mbox = (MAILBOXQ *) (hba->mbox_queue.q_first);
998 	while (mbox) {
999 		mbsave = mbox;
1000 		mbox = (MAILBOXQ *) mbox->next;
1001 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mbsave);
1002 	}
1003 	hba->mbox_queue.q_first = NULL;
1004 	hba->mbox_queue.q_last = NULL;
1005 	hba->mbox_queue.q_cnt = 0;
1006 	hba->mbox_queue_flag = 0;
1007 
1008 	/* Free the nodes */
1009 	for (j = 0; j < MAX_VPORTS; j++) {
1010 		vport = &VPORT(j);
1011 		if (vport->node_count) {
1012 			emlxs_node_destroy_all(vport);
1013 		}
1014 	}
1015 
1016 	/* Free memory associated with all buffers on get buffer pool */
1017 	if (hba->iotag_table) {
1018 		fcp_rp = &hba->ring[FC_FCP_RING];
1019 		ip_rp = &hba->ring[FC_IP_RING];
1020 		els_rp = &hba->ring[FC_ELS_RING];
1021 		ct_rp = &hba->ring[FC_CT_RING];
1022 
1023 		total_iotags = fcp_rp->max_iotag + ip_rp->max_iotag +
1024 		    els_rp->max_iotag + ct_rp->max_iotag;
1025 
1026 		bzero(buf_info, sizeof (MBUF_INFO));
1027 		buf_info->size = total_iotags * sizeof (emlxs_buf_t *);
1028 		buf_info->virt = hba->iotag_table;
1029 		emlxs_mem_free(hba, buf_info);
1030 
1031 		hba->iotag_table = 0;
1032 	}
1033 #ifdef EMLXS_SPARC
1034 	if (hba->fcp_bpl_table) {
1035 		bzero(buf_info, sizeof (MBUF_INFO));
1036 		buf_info->size = fcp_rp->max_iotag * sizeof (MATCHMAP);
1037 		buf_info->virt = hba->fcp_bpl_table;
1038 		emlxs_mem_free(hba, buf_info);
1039 
1040 		hba->fcp_bpl_table = 0;
1041 	}
1042 	if (hba->fcp_bpl_mp.virt) {
1043 		bzero(buf_info, sizeof (MBUF_INFO));
1044 		buf_info->size = hba->fcp_bpl_mp.size;
1045 		buf_info->virt = hba->fcp_bpl_mp.virt;
1046 		buf_info->phys = hba->fcp_bpl_mp.phys;
1047 		buf_info->dma_handle = hba->fcp_bpl_mp.dma_handle;
1048 		buf_info->data_handle = hba->fcp_bpl_mp.data_handle;
1049 		buf_info->flags = FC_MBUF_DMA;
1050 		emlxs_mem_free(hba, buf_info);
1051 
1052 		bzero(&hba->fcp_bpl_mp, sizeof (MATCHMAP));
1053 	}
1054 #endif	/* EMLXS_SPARC */
1055 
1056 	/* Free the memory segments */
1057 	for (j = 0; j < FC_MAX_SEG; j++) {
1058 		mp = &hba->memseg[j];
1059 
1060 		/* MEM_NLP, MEM_IOCB, MEM_MBOX */
1061 		if (j < MEM_BPL) {
1062 			if (mp->fc_memstart_virt) {
1063 				kmem_free(mp->fc_memstart_virt,
1064 				    mp->fc_total_memsize);
1065 				bzero((char *)mp, sizeof (MEMSEG));
1066 			}
1067 			continue;
1068 		}
1069 		/*
1070 		 * MEM_BPL, MEM_BUF, MEM_ELSBUF, MEM_IPBUF, MEM_CTBUF,
1071 		 * MEM_FCTBUF
1072 		 */
1073 
1074 		/* Free memory associated with all buffers on get buffer pool */
1075 		mutex_enter(&EMLXS_MEMGET_LOCK);
1076 		while ((bp = mp->fc_memget_ptr) != NULL) {
1077 			mp->fc_memget_ptr = *((uint8_t **)bp);
1078 			mm = (MATCHMAP *) bp;
1079 
1080 			bzero(buf_info, sizeof (MBUF_INFO));
1081 			buf_info->size = mm->size;
1082 			buf_info->virt = mm->virt;
1083 			buf_info->phys = mm->phys;
1084 			buf_info->dma_handle = mm->dma_handle;
1085 			buf_info->data_handle = mm->data_handle;
1086 			buf_info->flags = FC_MBUF_DMA;
1087 			emlxs_mem_free(hba, buf_info);
1088 
1089 			bzero(buf_info, sizeof (MBUF_INFO));
1090 			buf_info->size = sizeof (MATCHMAP);
1091 			buf_info->virt = (uint32_t *)mm;
1092 			emlxs_mem_free(hba, buf_info);
1093 		}
1094 		mutex_exit(&EMLXS_MEMGET_LOCK);
1095 
1096 		/* Free memory associated with all buffers on put buffer pool */
1097 		mutex_enter(&EMLXS_MEMPUT_LOCK);
1098 		while ((bp = mp->fc_memput_ptr) != NULL) {
1099 			mp->fc_memput_ptr = *((uint8_t **)bp);
1100 			mm = (MATCHMAP *) bp;
1101 
1102 			bzero(buf_info, sizeof (MBUF_INFO));
1103 			buf_info->size = mm->size;
1104 			buf_info->virt = mm->virt;
1105 			buf_info->phys = mm->phys;
1106 			buf_info->dma_handle = mm->dma_handle;
1107 			buf_info->data_handle = mm->data_handle;
1108 			buf_info->flags = FC_MBUF_DMA;
1109 			emlxs_mem_free(hba, buf_info);
1110 
1111 			bzero(buf_info, sizeof (MBUF_INFO));
1112 			buf_info->size = sizeof (MATCHMAP);
1113 			buf_info->virt = (uint32_t *)mm;
1114 			emlxs_mem_free(hba, buf_info);
1115 		}
1116 		mutex_exit(&EMLXS_MEMPUT_LOCK);
1117 		bzero((char *)mp, sizeof (MEMSEG));
1118 	}
1119 
1120 	return (0);
1121 
1122 } /* emlxs_mem_free_buffer() */
1123 
1124 
1125 extern uint8_t *
1126 emlxs_mem_buf_alloc(emlxs_hba_t *hba)
1127 {
1128 	emlxs_port_t *port = &PPORT;
1129 	uint8_t *bp = NULL;
1130 	MATCHMAP *matp = NULL;
1131 	MBUF_INFO *buf_info;
1132 	MBUF_INFO bufinfo;
1133 
1134 	buf_info = &bufinfo;
1135 
1136 	bzero(buf_info, sizeof (MBUF_INFO));
1137 	buf_info->size = sizeof (MATCHMAP);
1138 	buf_info->align = sizeof (void *);
1139 
1140 	(void) emlxs_mem_alloc(hba, buf_info);
1141 	if (buf_info->virt == NULL) {
1142 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1143 		    "MEM_BUF_ALLOC buffer.");
1144 
1145 		return (0);
1146 	}
1147 	matp = (MATCHMAP *) buf_info->virt;
1148 	bzero(matp, sizeof (MATCHMAP));
1149 
1150 	bzero(buf_info, sizeof (MBUF_INFO));
1151 	buf_info->size = MEM_BUF_SIZE;
1152 	buf_info->flags = FC_MBUF_DMA;
1153 	buf_info->align = 32;
1154 
1155 	(void) emlxs_mem_alloc(hba, buf_info);
1156 	if (buf_info->virt == NULL) {
1157 
1158 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1159 		    "MEM_BUF_ALLOC DMA buffer.");
1160 
1161 		return (0);
1162 	}
1163 	bp = (uint8_t *)buf_info->virt;
1164 	bzero(bp, MEM_BUF_SIZE);
1165 
1166 	matp->fc_mptr = NULL;
1167 	matp->virt = buf_info->virt;
1168 	matp->phys = buf_info->phys;
1169 	matp->size = buf_info->size;
1170 	matp->dma_handle = buf_info->dma_handle;
1171 	matp->data_handle = buf_info->data_handle;
1172 	matp->tag = MEM_BUF;
1173 	matp->flag |= MAP_BUF_ALLOCATED;
1174 
1175 	return ((uint8_t *)matp);
1176 
1177 } /* emlxs_mem_buf_alloc() */
1178 
1179 
1180 extern uint8_t *
1181 emlxs_mem_buf_free(emlxs_hba_t *hba, uint8_t *bp)
1182 {
1183 	MATCHMAP *matp;
1184 	MBUF_INFO bufinfo;
1185 	MBUF_INFO *buf_info;
1186 
1187 	buf_info = &bufinfo;
1188 
1189 	matp = (MATCHMAP *) bp;
1190 
1191 	if (!(matp->flag & MAP_BUF_ALLOCATED)) {
1192 		return (NULL);
1193 	}
1194 
1195 	bzero(buf_info, sizeof (MBUF_INFO));
1196 	buf_info->size = matp->size;
1197 	buf_info->virt = matp->virt;
1198 	buf_info->phys = matp->phys;
1199 	buf_info->dma_handle = matp->dma_handle;
1200 	buf_info->data_handle = matp->data_handle;
1201 	buf_info->flags = FC_MBUF_DMA;
1202 	emlxs_mem_free(hba, buf_info);
1203 
1204 	bzero(buf_info, sizeof (MBUF_INFO));
1205 	buf_info->size = sizeof (MATCHMAP);
1206 	buf_info->virt = (uint32_t *)matp;
1207 	emlxs_mem_free(hba, buf_info);
1208 
1209 	return (bp);
1210 
1211 } /* emlxs_mem_buf_free() */
1212 
1213 
1214 
1215 /*
1216  *   emlxs_mem_get
1217  *
1218  *   This routine will get a free memory buffer.
1219  *   seg identifies which buffer pool to use.
1220  *   Returns the free buffer ptr or 0 for no buf
1221  */
1222 extern uint8_t *
1223 emlxs_mem_get(emlxs_hba_t *hba, uint32_t arg)
1224 {
1225 	emlxs_port_t *port = &PPORT;
1226 	MEMSEG *mp;
1227 	uint8_t *bp = NULL;
1228 	uint32_t seg = arg & MEM_SEG_MASK;
1229 	MAILBOXQ *mbq;
1230 	MATCHMAP *matp;
1231 	IOCBQ *iocbq;
1232 	NODELIST *node;
1233 	uint8_t *base;
1234 	uint8_t *end;
1235 
1236 	/* range check on seg argument */
1237 	if (seg >= FC_MAX_SEG) {
1238 		return (NULL);
1239 	}
1240 	mp = &hba->memseg[seg];
1241 
1242 	/* Check if memory segment destroyed! */
1243 	if (mp->fc_memsize == 0) {
1244 		return (NULL);
1245 	}
1246 	mutex_enter(&EMLXS_MEMGET_LOCK);
1247 
1248 	/*
1249 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, "mem_get[%d]:
1250 	 * memget=%p,%d  memput=%p,%d", seg, mp->fc_memget_ptr,
1251 	 * mp->fc_memget_cnt, mp->fc_memput_ptr, mp->fc_memput_cnt);
1252 	 */
1253 
1254 top:
1255 
1256 	if (mp->fc_memget_ptr) {
1257 		bp = mp->fc_memget_ptr;
1258 
1259 		/*
1260 		 * Checking (seg == MEM_MBOX || seg == MEM_IOCB || seg ==
1261 		 * MEM_NLP)
1262 		 */
1263 		/* Verify buffer is in this memory region */
1264 		if (mp->fc_memstart_virt && mp->fc_total_memsize) {
1265 			base = mp->fc_memstart_virt;
1266 			end = mp->fc_memstart_virt + mp->fc_total_memsize;
1267 			if (bp < base || bp >= end) {
1268 				/* Invalidate the the get list */
1269 				mp->fc_memget_ptr = NULL;
1270 				mp->fc_memget_end = NULL;
1271 				mp->fc_memget_cnt = 0;
1272 
1273 				EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1274 				    "Corruption detected: seg=%x bp=%p "
1275 				    "base=%p end=%p.", seg, bp, base, end);
1276 
1277 				emlxs_ffstate_change(hba, FC_ERROR);
1278 
1279 				mutex_exit(&EMLXS_MEMGET_LOCK);
1280 
1281 				(void) thread_create(NULL, 0,
1282 				    emlxs_shutdown_thread,
1283 				    (char *)hba, 0, &p0, TS_RUN,
1284 				    v.v_maxsyspri - 2);
1285 
1286 				return (NULL);
1287 			}
1288 		}
1289 		/*
1290 		 * If a memory block exists, take it off freelist and return
1291 		 * it to the user.
1292 		 */
1293 		if (mp->fc_memget_end == bp) {
1294 			mp->fc_memget_ptr = NULL;
1295 			mp->fc_memget_end = NULL;
1296 			mp->fc_memget_cnt = 0;
1297 
1298 		} else {
1299 			/*
1300 			 * Pointer to the next free buffer
1301 			 */
1302 			mp->fc_memget_ptr = *((uint8_t **)bp);
1303 			mp->fc_memget_cnt--;
1304 		}
1305 
1306 		switch (seg) {
1307 		case MEM_MBOX:
1308 			bzero(bp, sizeof (MAILBOXQ));
1309 
1310 			mbq = (MAILBOXQ *) bp;
1311 			mbq->flag |= MBQ_POOL_ALLOCATED;
1312 			break;
1313 
1314 		case MEM_IOCB:
1315 			bzero(bp, sizeof (IOCBQ));
1316 
1317 			iocbq = (IOCBQ *) bp;
1318 			iocbq->flag |= IOCB_POOL_ALLOCATED;
1319 			break;
1320 
1321 		case MEM_NLP:
1322 			bzero(bp, sizeof (NODELIST));
1323 
1324 			node = (NODELIST *) bp;
1325 			node->flag |= NODE_POOL_ALLOCATED;
1326 			break;
1327 
1328 		case MEM_BPL:
1329 		case MEM_BUF:	/* MEM_ELSBUF */
1330 		case MEM_IPBUF:
1331 		case MEM_CTBUF:
1332 #ifdef SFCT_SUPPORT
1333 		case MEM_FCTBUF:
1334 #endif	/* SFCT_SUPPORT */
1335 		default:
1336 			matp = (MATCHMAP *) bp;
1337 			matp->fc_mptr = NULL;
1338 			matp->flag |= MAP_POOL_ALLOCATED;
1339 			break;
1340 		}
1341 	} else {
1342 		mutex_enter(&EMLXS_MEMPUT_LOCK);
1343 		if (mp->fc_memput_ptr) {
1344 			/*
1345 			 * Move buffer from memput to memget
1346 			 */
1347 			mp->fc_memget_ptr = mp->fc_memput_ptr;
1348 			mp->fc_memget_end = mp->fc_memput_end;
1349 			mp->fc_memget_cnt = mp->fc_memput_cnt;
1350 			mp->fc_memput_ptr = NULL;
1351 			mp->fc_memput_end = NULL;
1352 			mp->fc_memput_cnt = 0;
1353 			mutex_exit(&EMLXS_MEMPUT_LOCK);
1354 
1355 			goto top;
1356 		}
1357 		mutex_exit(&EMLXS_MEMPUT_LOCK);
1358 
1359 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_alloc_failed_msg,
1360 		    "Pool empty: seg=%x lowmem=%x free=%x",
1361 		    seg, mp->fc_lowmem, mp->fc_memget_cnt);
1362 
1363 		/* HBASTATS.memAllocErr++; */
1364 	}
1365 
1366 	/*
1367 	 * bp2 = mp->fc_memget_ptr;
1368 	 *
1369 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, "mem_get[%d]-:
1370 	 * memget=%p,%d  memput=%p,%d >%x", seg, mp->fc_memget_ptr,
1371 	 * mp->fc_memget_cnt, mp->fc_memput_ptr, mp->fc_memput_cnt, ((bp2)?
1372 	 * *((uint8_t **) bp2):0));
1373 	 */
1374 
1375 	mutex_exit(&EMLXS_MEMGET_LOCK);
1376 
1377 	return (bp);
1378 
1379 } /* emlxs_mem_get() */
1380 
1381 
1382 
1383 extern uint8_t *
1384 emlxs_mem_put(emlxs_hba_t *hba, uint32_t seg, uint8_t *bp)
1385 {
1386 	emlxs_port_t *port = &PPORT;
1387 	MEMSEG *mp;
1388 	uint8_t *oldbp;
1389 	MATCHMAP *matp;
1390 	IOCBQ *iocbq;
1391 	MAILBOXQ *mbq;
1392 	NODELIST *node;
1393 	uint8_t *base;
1394 	uint8_t *end;
1395 
1396 	if (!bp) {
1397 		return (NULL);
1398 	}
1399 	/* Check on seg argument */
1400 	if (seg >= FC_MAX_SEG) {
1401 		return (NULL);
1402 	}
1403 	mp = &hba->memseg[seg];
1404 
1405 	switch (seg) {
1406 	case MEM_MBOX:
1407 		mbq = (MAILBOXQ *) bp;
1408 
1409 		if (!(mbq->flag & MBQ_POOL_ALLOCATED)) {
1410 			return (bp);
1411 		}
1412 		break;
1413 
1414 	case MEM_IOCB:
1415 		iocbq = (IOCBQ *) bp;
1416 
1417 		/* Check to make sure the IOCB is pool allocated */
1418 		if (!(iocbq->flag & IOCB_POOL_ALLOCATED)) {
1419 			return (bp);
1420 		}
1421 		/*
1422 		 * Any IOCBQ with a packet attached did not come from our
1423 		 * pool
1424 		 */
1425 		if (iocbq->sbp) {
1426 			return (bp);
1427 		}
1428 		break;
1429 
1430 	case MEM_NLP:
1431 		node = (NODELIST *) bp;
1432 
1433 		/* Check to make sure the NODE is pool allocated */
1434 		if (!(node->flag & NODE_POOL_ALLOCATED)) {
1435 			return (bp);
1436 		}
1437 		break;
1438 
1439 	case MEM_BPL:
1440 	case MEM_BUF:	/* MEM_ELSBUF */
1441 	case MEM_IPBUF:
1442 	case MEM_CTBUF:
1443 #ifdef SFCT_SUPPORT
1444 	case MEM_FCTBUF:
1445 #endif	/* SFCT_SUPPORT */
1446 	default:
1447 		matp = (MATCHMAP *) bp;
1448 
1449 		if (matp->flag & MAP_BUF_ALLOCATED) {
1450 			return (emlxs_mem_buf_free(hba, bp));
1451 		}
1452 		if (matp->flag & MAP_TABLE_ALLOCATED) {
1453 			return (bp);
1454 		}
1455 		/* Check to make sure the MATCHMAP is pool allocated */
1456 		if (!(matp->flag & MAP_POOL_ALLOCATED)) {
1457 			return (bp);
1458 		}
1459 		break;
1460 	}
1461 
1462 	/* Free the pool object */
1463 	mutex_enter(&EMLXS_MEMPUT_LOCK);
1464 
1465 	/*
1466 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, "mem_put[%d]:
1467 	 * memget=%p,%d  memput=%p,%d", seg, mp->fc_memget_ptr,
1468 	 * mp->fc_memget_cnt, mp->fc_memput_ptr, mp->fc_memput_cnt);
1469 	 */
1470 
1471 	/* Check if memory segment destroyed! */
1472 	if (mp->fc_memsize == 0) {
1473 		mutex_exit(&EMLXS_MEMPUT_LOCK);
1474 		return (NULL);
1475 	}
1476 	/* Check if buffer was just freed */
1477 	if (mp->fc_memput_ptr == bp) {
1478 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1479 		    "Freeing Free object: seg=%x bp=%p", seg, bp);
1480 
1481 		mutex_exit(&EMLXS_MEMPUT_LOCK);
1482 		return (NULL);
1483 	}
1484 	/* Validate the buffer */
1485 
1486 	/*
1487 	 * Checking (seg == MEM_BUF) || (seg == MEM_BPL) || (seg ==
1488 	 * MEM_CTBUF) || (seg == MEM_IPBUF) || (seg == MEM_FCTBUF)
1489 	 */
1490 	if (mp->fc_memflag & FC_MEM_DMA) {
1491 		if (matp->tag != seg) {
1492 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1493 			    "Corruption detected: seg=%x tag=%x bp=%p",
1494 			    seg, matp->tag, bp);
1495 
1496 			emlxs_ffstate_change(hba, FC_ERROR);
1497 
1498 			mutex_exit(&EMLXS_MEMPUT_LOCK);
1499 
1500 			(void) thread_create(NULL, 0, emlxs_shutdown_thread,
1501 			    (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
1502 
1503 			return (NULL);
1504 		}
1505 	}
1506 	/* Checking (seg == MEM_MBOX || seg == MEM_IOCB || seg == MEM_NLP) */
1507 	else if (mp->fc_memstart_virt && mp->fc_total_memsize) {
1508 		base = mp->fc_memstart_virt;
1509 		end = mp->fc_memstart_virt + mp->fc_total_memsize;
1510 		if (bp < base || bp >= end) {
1511 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1512 			    "Corruption detected: seg=%x bp=%p base=%p end=%p",
1513 			    seg, bp, base, end);
1514 
1515 			emlxs_ffstate_change(hba, FC_ERROR);
1516 
1517 			mutex_exit(&EMLXS_MEMPUT_LOCK);
1518 
1519 			(void) thread_create(NULL, 0, emlxs_shutdown_thread,
1520 			    (char *)hba, 0, &p0, TS_RUN, v.v_maxsyspri - 2);
1521 
1522 			return (NULL);
1523 		}
1524 	}
1525 	/* Release to the first place of the freelist */
1526 	oldbp = mp->fc_memput_ptr;
1527 	mp->fc_memput_ptr = bp;
1528 	*((uint8_t **)bp) = oldbp;
1529 
1530 	if (oldbp == NULL) {
1531 		mp->fc_memput_end = bp;
1532 		mp->fc_memput_cnt = 1;
1533 	} else {
1534 		mp->fc_memput_cnt++;
1535 	}
1536 
1537 	/*
1538 	 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg, "mem_put[%d]-:
1539 	 * memget=%p,%d  memput=%p,%d", seg, mp->fc_memget_ptr,
1540 	 * mp->fc_memget_cnt, mp->fc_memput_ptr, mp->fc_memput_cnt);
1541 	 */
1542 
1543 	mutex_exit(&EMLXS_MEMPUT_LOCK);
1544 
1545 	return (bp);
1546 
1547 } /* emlxs_mem_put() */
1548 
1549 
1550 
1551 /*
1552  * Look up the virtual address given a mapped address
1553  */
1554 extern MATCHMAP *
1555 emlxs_mem_get_vaddr(emlxs_hba_t *hba, RING *rp, uint64_t mapbp)
1556 {
1557 	emlxs_port_t *port = &PPORT;
1558 	MATCHMAP *prev;
1559 	MATCHMAP *mp;
1560 
1561 	switch (rp->ringno) {
1562 	case FC_ELS_RING:
1563 		mp = (MATCHMAP *) rp->fc_mpoff;
1564 		prev = 0;
1565 
1566 		while (mp) {
1567 			if (mp->phys == mapbp) {
1568 				if (prev == 0) {
1569 					rp->fc_mpoff = mp->fc_mptr;
1570 				} else {
1571 					prev->fc_mptr = mp->fc_mptr;
1572 				}
1573 
1574 				if (rp->fc_mpon == (uint8_t *)mp) {
1575 					rp->fc_mpon = (uint8_t *)prev;
1576 				}
1577 				mp->fc_mptr = 0;
1578 
1579 				emlxs_mpdata_sync(mp->dma_handle, 0, mp->size,
1580 				    DDI_DMA_SYNC_FORKERNEL);
1581 
1582 				HBASTATS.ElsUbPosted--;
1583 
1584 				return (mp);
1585 			}
1586 			prev = mp;
1587 			mp = (MATCHMAP *) mp->fc_mptr;
1588 		}
1589 
1590 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1591 		    "ELS Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1592 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1593 
1594 		break;
1595 
1596 	case FC_CT_RING:
1597 		mp = (MATCHMAP *) rp->fc_mpoff;
1598 		prev = 0;
1599 
1600 		while (mp) {
1601 			if (mp->phys == mapbp) {
1602 				if (prev == 0) {
1603 					rp->fc_mpoff = mp->fc_mptr;
1604 				} else {
1605 					prev->fc_mptr = mp->fc_mptr;
1606 				}
1607 
1608 				if (rp->fc_mpon == (uint8_t *)mp) {
1609 					rp->fc_mpon = (uint8_t *)prev;
1610 				}
1611 				mp->fc_mptr = 0;
1612 
1613 				emlxs_mpdata_sync(mp->dma_handle, 0, mp->size,
1614 				    DDI_DMA_SYNC_FORKERNEL);
1615 
1616 				HBASTATS.CtUbPosted--;
1617 
1618 				return (mp);
1619 			}
1620 			prev = mp;
1621 			mp = (MATCHMAP *) mp->fc_mptr;
1622 		}
1623 
1624 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1625 		    "CT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1626 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1627 
1628 		break;
1629 
1630 	case FC_IP_RING:
1631 		mp = (MATCHMAP *) rp->fc_mpoff;
1632 		prev = 0;
1633 
1634 		while (mp) {
1635 			if (mp->phys == mapbp) {
1636 				if (prev == 0) {
1637 					rp->fc_mpoff = mp->fc_mptr;
1638 				} else {
1639 					prev->fc_mptr = mp->fc_mptr;
1640 				}
1641 
1642 				if (rp->fc_mpon == (uint8_t *)mp) {
1643 					rp->fc_mpon = (uint8_t *)prev;
1644 				}
1645 				mp->fc_mptr = 0;
1646 
1647 				emlxs_mpdata_sync(mp->dma_handle, 0, mp->size,
1648 				    DDI_DMA_SYNC_FORKERNEL);
1649 
1650 				HBASTATS.IpUbPosted--;
1651 
1652 				return (mp);
1653 			}
1654 			prev = mp;
1655 			mp = (MATCHMAP *) mp->fc_mptr;
1656 		}
1657 
1658 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1659 		    "IP Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1660 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1661 
1662 		break;
1663 
1664 #ifdef SFCT_SUPPORT
1665 	case FC_FCT_RING:
1666 		mp = (MATCHMAP *) rp->fc_mpoff;
1667 		prev = 0;
1668 
1669 		while (mp) {
1670 			if (mp->phys == mapbp) {
1671 				if (prev == 0) {
1672 					rp->fc_mpoff = mp->fc_mptr;
1673 				} else {
1674 					prev->fc_mptr = mp->fc_mptr;
1675 				}
1676 
1677 				if (rp->fc_mpon == (uint8_t *)mp) {
1678 					rp->fc_mpon = (uint8_t *)prev;
1679 				}
1680 				mp->fc_mptr = 0;
1681 
1682 				emlxs_mpdata_sync(mp->dma_handle, 0, mp->size,
1683 				    DDI_DMA_SYNC_FORKERNEL);
1684 
1685 				HBASTATS.FctUbPosted--;
1686 
1687 				return (mp);
1688 			}
1689 			prev = mp;
1690 			mp = (MATCHMAP *) mp->fc_mptr;
1691 		}
1692 
1693 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1694 		    "FCT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1695 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1696 
1697 		break;
1698 #endif	/* SFCT_SUPPORT */
1699 	}
1700 
1701 	return (0);
1702 
1703 } /* emlxs_mem_get_vaddr() */
1704 
1705 
1706 /*
1707  * Given a virtual address, bp, generate the physical mapped address and place
1708  * it where addr points to. Save the address pair for lookup later.
1709  */
1710 extern void
1711 emlxs_mem_map_vaddr(emlxs_hba_t *hba, RING *rp, MATCHMAP *mp, uint32_t *haddr,
1712     uint32_t *laddr)
1713 {
1714 	switch (rp->ringno) {
1715 	case FC_ELS_RING:
1716 		/*
1717 		 * Update slot fc_mpon points to then bump it fc_mpoff is
1718 		 * pointer head of the list. fc_mpon  is pointer tail of the
1719 		 * list.
1720 		 */
1721 		mp->fc_mptr = 0;
1722 		if (rp->fc_mpoff == 0) {
1723 			rp->fc_mpoff = (uint8_t *)mp;
1724 			rp->fc_mpon = (uint8_t *)mp;
1725 		} else {
1726 			((MATCHMAP *) (rp->fc_mpon))->fc_mptr = (uint8_t *)mp;
1727 			rp->fc_mpon = (uint8_t *)mp;
1728 		}
1729 
1730 		if (hba->flag & FC_SLIM2_MODE) {
1731 			/* return mapped address */
1732 			*haddr = (uint32_t)putPaddrHigh(mp->phys);
1733 			*laddr = (uint32_t)putPaddrLow(mp->phys);
1734 		} else {
1735 			/* return mapped address */
1736 			*laddr = (uint32_t)putPaddrLow(mp->phys);
1737 		}
1738 
1739 		HBASTATS.ElsUbPosted++;
1740 
1741 		break;
1742 
1743 	case FC_CT_RING:
1744 		/*
1745 		 * Update slot fc_mpon points to then bump it fc_mpoff is
1746 		 * pointer head of the list. fc_mpon  is pointer tail of the
1747 		 * list.
1748 		 */
1749 		mp->fc_mptr = 0;
1750 		if (rp->fc_mpoff == 0) {
1751 			rp->fc_mpoff = (uint8_t *)mp;
1752 			rp->fc_mpon = (uint8_t *)mp;
1753 		} else {
1754 			((MATCHMAP *) (rp->fc_mpon))->fc_mptr = (uint8_t *)mp;
1755 			rp->fc_mpon = (uint8_t *)mp;
1756 		}
1757 
1758 		if (hba->flag & FC_SLIM2_MODE) {
1759 			/* return mapped address */
1760 			*haddr = (uint32_t)putPaddrHigh(mp->phys);
1761 			*laddr = (uint32_t)putPaddrLow(mp->phys);
1762 		} else {
1763 			/* return mapped address */
1764 			*laddr = (uint32_t)putPaddrLow(mp->phys);
1765 		}
1766 
1767 		HBASTATS.CtUbPosted++;
1768 
1769 		break;
1770 
1771 
1772 	case FC_IP_RING:
1773 		/*
1774 		 * Update slot fc_mpon points to then bump it fc_mpoff is
1775 		 * pointer head of the list. fc_mpon  is pointer tail of the
1776 		 * list.
1777 		 */
1778 		mp->fc_mptr = 0;
1779 		if (rp->fc_mpoff == 0) {
1780 			rp->fc_mpoff = (uint8_t *)mp;
1781 			rp->fc_mpon = (uint8_t *)mp;
1782 		} else {
1783 			((MATCHMAP *) (rp->fc_mpon))->fc_mptr = (uint8_t *)mp;
1784 			rp->fc_mpon = (uint8_t *)mp;
1785 		}
1786 
1787 		if (hba->flag & FC_SLIM2_MODE) {
1788 			/* return mapped address */
1789 			*haddr = (uint32_t)putPaddrHigh(mp->phys);
1790 			*laddr = (uint32_t)putPaddrLow(mp->phys);
1791 		} else {
1792 			/* return mapped address */
1793 			*laddr = (uint32_t)putPaddrLow(mp->phys);
1794 		}
1795 
1796 		HBASTATS.IpUbPosted++;
1797 		break;
1798 
1799 
1800 #ifdef SFCT_SUPPORT
1801 	case FC_FCT_RING:
1802 		/*
1803 		 * Update slot fc_mpon points to then bump it fc_mpoff is
1804 		 * pointer head of the list. fc_mpon  is pointer tail of the
1805 		 * list.
1806 		 */
1807 		mp->fc_mptr = 0;
1808 		if (rp->fc_mpoff == 0) {
1809 			rp->fc_mpoff = (uint8_t *)mp;
1810 			rp->fc_mpon = (uint8_t *)mp;
1811 		} else {
1812 			((MATCHMAP *) (rp->fc_mpon))->fc_mptr = (uint8_t *)mp;
1813 			rp->fc_mpon = (uint8_t *)mp;
1814 		}
1815 
1816 		if (hba->flag & FC_SLIM2_MODE) {
1817 			/* return mapped address */
1818 			*haddr = (uint32_t)putPaddrHigh(mp->phys);
1819 			*laddr = (uint32_t)putPaddrLow(mp->phys);
1820 		} else {
1821 			/* return mapped address */
1822 			*laddr = (uint32_t)putPaddrLow(mp->phys);
1823 		}
1824 
1825 		HBASTATS.FctUbPosted++;
1826 		break;
1827 #endif	/* SFCT_SUPPORT */
1828 	}
1829 } /* emlxs_mem_map_vaddr() */
1830 
1831 
1832 #ifdef SLI3_SUPPORT
1833 
1834 static uint32_t
1835 emlxs_hbq_alloc(emlxs_hba_t *hba, uint32_t hbq_id)
1836 {
1837 	emlxs_port_t *port = &PPORT;
1838 	HBQ_INIT_t *hbq;
1839 	MBUF_INFO *buf_info;
1840 	MBUF_INFO bufinfo;
1841 
1842 	hbq = &hba->hbq_table[hbq_id];
1843 
1844 	if (hbq->HBQ_host_buf.virt == 0) {
1845 		buf_info = &bufinfo;
1846 
1847 		/* Get the system's page size in a DDI-compliant way. */
1848 		bzero(buf_info, sizeof (MBUF_INFO));
1849 		buf_info->size = hbq->HBQ_numEntries * sizeof (HBQE_t);
1850 		buf_info->flags = FC_MBUF_DMA;
1851 		buf_info->align = 4096;
1852 
1853 		(void) emlxs_mem_alloc(hba, buf_info);
1854 
1855 		if (buf_info->virt == NULL) {
1856 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
1857 			    "Unable to alloc HBQ.");
1858 			return (ENOMEM);
1859 		}
1860 		hbq->HBQ_host_buf.virt = (void *) buf_info->virt;
1861 		hbq->HBQ_host_buf.phys = buf_info->phys;
1862 		hbq->HBQ_host_buf.data_handle = buf_info->data_handle;
1863 		hbq->HBQ_host_buf.dma_handle = buf_info->dma_handle;
1864 		hbq->HBQ_host_buf.size = buf_info->size;
1865 		hbq->HBQ_host_buf.tag = hbq_id;
1866 
1867 		bzero((char *)hbq->HBQ_host_buf.virt, buf_info->size);
1868 	}
1869 	return (0);
1870 
1871 } /* emlxs_hbq_alloc() */
1872 
1873 
1874 extern uint32_t
1875 emlxs_hbq_setup(emlxs_hba_t *hba, uint32_t hbq_id)
1876 {
1877 	emlxs_port_t *port = &PPORT;
1878 	HBQ_INIT_t *hbq;
1879 	MATCHMAP *mp;
1880 	HBQE_t *hbqE;
1881 	MAILBOX *mb;
1882 	void *ioa2;
1883 	uint32_t j;
1884 	uint32_t count;
1885 	uint32_t size;
1886 	uint32_t ringno;
1887 	uint32_t seg;
1888 
1889 	switch (hbq_id) {
1890 	case EMLXS_ELS_HBQ_ID:
1891 		count = MEM_ELSBUF_COUNT;
1892 		size = MEM_ELSBUF_SIZE;
1893 		ringno = FC_ELS_RING;
1894 		seg = MEM_ELSBUF;
1895 		HBASTATS.ElsUbPosted = count;
1896 		break;
1897 
1898 	case EMLXS_IP_HBQ_ID:
1899 		count = MEM_IPBUF_COUNT;
1900 		size = MEM_IPBUF_SIZE;
1901 		ringno = FC_IP_RING;
1902 		seg = MEM_IPBUF;
1903 		HBASTATS.IpUbPosted = count;
1904 		break;
1905 
1906 	case EMLXS_CT_HBQ_ID:
1907 		count = MEM_CTBUF_COUNT;
1908 		size = MEM_CTBUF_SIZE;
1909 		ringno = FC_CT_RING;
1910 		seg = MEM_CTBUF;
1911 		HBASTATS.CtUbPosted = count;
1912 		break;
1913 
1914 #ifdef SFCT_SUPPORT
1915 	case EMLXS_FCT_HBQ_ID:
1916 		count = MEM_FCTBUF_COUNT;
1917 		size = MEM_FCTBUF_SIZE;
1918 		ringno = FC_FCT_RING;
1919 		seg = MEM_FCTBUF;
1920 		HBASTATS.FctUbPosted = count;
1921 		break;
1922 #endif	/* SFCT_SUPPORT */
1923 
1924 	default:
1925 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
1926 		    "emlxs_hbq_setup: Invalid HBQ id. (%x)", hbq_id);
1927 		return (1);
1928 	}
1929 
1930 	/* Configure HBQ */
1931 	hbq = &hba->hbq_table[hbq_id];
1932 	hbq->HBQ_numEntries = count;
1933 
1934 	/* Get a Mailbox buffer to setup mailbox commands for CONFIG_HBQ */
1935 	if ((mb = (MAILBOX *) emlxs_mem_get(hba, (MEM_MBOX | MEM_PRI))) == 0) {
1936 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
1937 		    "emlxs_hbq_setup: Unable to get mailbox.");
1938 		return (1);
1939 	}
1940 	/* Allocate HBQ Host buffer and Initialize the HBQEs */
1941 	if (emlxs_hbq_alloc(hba, hbq_id)) {
1942 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
1943 		    "emlxs_hbq_setup: Unable to allocate HBQ.");
1944 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1945 		return (1);
1946 	}
1947 	hbq->HBQ_recvNotify = 1;
1948 	hbq->HBQ_num_mask = 0;	/* Bind to ring */
1949 	hbq->HBQ_profile = 0;	/* Selection profile 0=all, 7=logentry */
1950 	hbq->HBQ_ringMask = 1 << ringno;	/* b0100 * ringno - Binds */
1951 						/*   HBA to a ring e.g. */
1952 	/* Ring0=b0001, Ring1=b0010, Ring2=b0100 */
1953 	hbq->HBQ_headerLen = 0;	/* 0 if not profile 4 or 5 */
1954 	hbq->HBQ_logEntry = 0;	/* Set to 1 if this HBQ will be used for */
1955 	hbq->HBQ_id = hbq_id;
1956 	hbq->HBQ_PutIdx_next = 0;
1957 	hbq->HBQ_PutIdx = hbq->HBQ_numEntries - 1;
1958 	hbq->HBQ_GetIdx = 0;
1959 	hbq->HBQ_PostBufCnt = hbq->HBQ_numEntries;
1960 	bzero(hbq->HBQ_PostBufs, sizeof (hbq->HBQ_PostBufs));
1961 
1962 	/* Fill in POST BUFFERs in HBQE */
1963 	hbqE = (HBQE_t *)hbq->HBQ_host_buf.virt;
1964 	for (j = 0; j < hbq->HBQ_numEntries; j++, hbqE++) {
1965 		/* Allocate buffer to post */
1966 		if ((mp = (MATCHMAP *) emlxs_mem_get(hba, (seg | MEM_PRI))) ==
1967 		    0) {
1968 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
1969 			    "emlxs_hbq_setup: Unable to allocate HBQ buffer. "
1970 			    "cnt=%d", j);
1971 			emlxs_hbq_free_all(hba, hbq_id);
1972 			return (1);
1973 		}
1974 		hbq->HBQ_PostBufs[j] = mp;
1975 
1976 		hbqE->unt.ext.HBQ_tag = hbq_id;
1977 		hbqE->unt.ext.HBQE_tag = j;
1978 		hbqE->bde.tus.f.bdeSize = size;
1979 		hbqE->bde.tus.f.bdeFlags = 0;
1980 		hbqE->unt.w = PCIMEM_LONG(hbqE->unt.w);
1981 		hbqE->bde.tus.w = PCIMEM_LONG(hbqE->bde.tus.w);
1982 		hbqE->bde.addrLow =
1983 		    PCIMEM_LONG((uint32_t)putPaddrLow(mp->phys));
1984 		hbqE->bde.addrHigh =
1985 		    PCIMEM_LONG((uint32_t)putPaddrHigh(mp->phys));
1986 	}
1987 
1988 	/* Issue CONFIG_HBQ */
1989 	emlxs_mb_config_hbq(hba, mb, hbq_id);
1990 	if (emlxs_mb_issue_cmd(hba, mb, MBX_WAIT, 0) != MBX_SUCCESS) {
1991 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_debug_msg,
1992 		    "emlxs_hbq_setup: Unable to config HBQ. cmd=%x status=%x",
1993 		    mb->mbxCommand, mb->mbxStatus);
1994 
1995 		(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
1996 		emlxs_hbq_free_all(hba, hbq_id);
1997 		return (1);
1998 	}
1999 	/* Setup HBQ Get/Put indexes */
2000 	ioa2 = (void *) ((char *)hba->slim_addr + (hba->hgp_hbq_offset +
2001 	    (hbq_id * sizeof (uint32_t))));
2002 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *) ioa2, hbq->HBQ_PutIdx);
2003 
2004 	hba->hbq_count++;
2005 
2006 	(void) emlxs_mem_put(hba, MEM_MBOX, (uint8_t *)mb);
2007 
2008 	return (0);
2009 
2010 } /* emlxs_hbq_setup */
2011 
2012 
2013 static void
2014 emlxs_hbq_free_all(emlxs_hba_t *hba, uint32_t hbq_id)
2015 {
2016 	HBQ_INIT_t *hbq;
2017 	MBUF_INFO *buf_info;
2018 	MBUF_INFO bufinfo;
2019 	uint32_t seg;
2020 	uint32_t j;
2021 
2022 	switch (hbq_id) {
2023 	case EMLXS_ELS_HBQ_ID:
2024 		seg = MEM_ELSBUF;
2025 		HBASTATS.ElsUbPosted = 0;
2026 		break;
2027 
2028 	case EMLXS_IP_HBQ_ID:
2029 		seg = MEM_IPBUF;
2030 		HBASTATS.IpUbPosted = 0;
2031 		break;
2032 
2033 	case EMLXS_CT_HBQ_ID:
2034 		seg = MEM_CTBUF;
2035 		HBASTATS.CtUbPosted = 0;
2036 		break;
2037 
2038 #ifdef SFCT_SUPPORT
2039 	case EMLXS_FCT_HBQ_ID:
2040 		seg = MEM_FCTBUF;
2041 		HBASTATS.FctUbPosted = 0;
2042 		break;
2043 #endif	/* SFCT_SUPPORT */
2044 
2045 	default:
2046 		return;
2047 	}
2048 
2049 
2050 	hbq = &hba->hbq_table[hbq_id];
2051 
2052 	if (hbq->HBQ_host_buf.virt != 0) {
2053 		for (j = 0; j < hbq->HBQ_PostBufCnt; j++) {
2054 			(void) emlxs_mem_put(hba, seg,
2055 			    (uint8_t *)hbq->HBQ_PostBufs[j]);
2056 			hbq->HBQ_PostBufs[j] = NULL;
2057 		}
2058 		hbq->HBQ_PostBufCnt = 0;
2059 
2060 		buf_info = &bufinfo;
2061 		bzero(buf_info, sizeof (MBUF_INFO));
2062 
2063 		buf_info->size = hbq->HBQ_host_buf.size;
2064 		buf_info->virt = hbq->HBQ_host_buf.virt;
2065 		buf_info->phys = hbq->HBQ_host_buf.phys;
2066 		buf_info->dma_handle = hbq->HBQ_host_buf.dma_handle;
2067 		buf_info->data_handle = hbq->HBQ_host_buf.data_handle;
2068 		buf_info->flags = FC_MBUF_DMA;
2069 
2070 		emlxs_mem_free(hba, buf_info);
2071 
2072 		hbq->HBQ_host_buf.virt = NULL;
2073 	}
2074 	return;
2075 
2076 } /* emlxs_hbq_free_all() */
2077 
2078 
2079 extern void
2080 emlxs_update_HBQ_index(emlxs_hba_t *hba, uint32_t hbq_id)
2081 {
2082 	void *ioa2;
2083 	uint32_t status;
2084 	uint32_t HBQ_PortGetIdx;
2085 	HBQ_INIT_t *hbq;
2086 
2087 	switch (hbq_id) {
2088 	case EMLXS_ELS_HBQ_ID:
2089 		HBASTATS.ElsUbPosted++;
2090 		break;
2091 
2092 	case EMLXS_IP_HBQ_ID:
2093 		HBASTATS.IpUbPosted++;
2094 		break;
2095 
2096 	case EMLXS_CT_HBQ_ID:
2097 		HBASTATS.CtUbPosted++;
2098 		break;
2099 
2100 #ifdef SFCT_SUPPORT
2101 	case EMLXS_FCT_HBQ_ID:
2102 		HBASTATS.FctUbPosted++;
2103 		break;
2104 #endif	/* SFCT_SUPPORT */
2105 
2106 	default:
2107 		return;
2108 	}
2109 
2110 	hbq = &hba->hbq_table[hbq_id];
2111 
2112 	hbq->HBQ_PutIdx = (hbq->HBQ_PutIdx + 1 >= hbq->HBQ_numEntries) ? 0 :
2113 	    hbq->HBQ_PutIdx + 1;
2114 
2115 	if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
2116 		HBQ_PortGetIdx = PCIMEM_LONG(((SLIM2 *) hba->slim2.virt)->
2117 		    mbx.us.s2.HBQ_PortGetIdx[hbq_id]);
2118 
2119 		hbq->HBQ_GetIdx = HBQ_PortGetIdx;
2120 
2121 		if (hbq->HBQ_PutIdx == hbq->HBQ_GetIdx) {
2122 			return;
2123 		}
2124 	}
2125 	ioa2 = (void *) ((char *)hba->slim_addr + (hba->hgp_hbq_offset +
2126 	    (hbq_id * sizeof (uint32_t))));
2127 	status = hbq->HBQ_PutIdx;
2128 	WRITE_SLIM_ADDR(hba, (volatile uint32_t *) ioa2, status);
2129 
2130 	return;
2131 
2132 } /* emlxs_update_HBQ_index() */
2133 
2134 #endif	/* SLI3_SUPPORT */
2135