1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at
9  * http://www.opensource.org/licenses/cddl1.txt.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2004-2011 Emulex. All rights reserved.
24  * Use is subject to license terms.
25  * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
26  * Copyright 2020 RackTop Systems, Inc.
27  */
28 
29 #include <emlxs.h>
30 
31 /* #define EMLXS_POOL_DEBUG */
32 
33 EMLXS_MSG_DEF(EMLXS_MEM_C);
34 
35 
36 static uint32_t emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg,
37 			uint32_t count);
38 static void emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count);
39 
40 
41 extern int32_t
emlxs_mem_alloc_buffer(emlxs_hba_t * hba)42 emlxs_mem_alloc_buffer(emlxs_hba_t *hba)
43 {
44 	emlxs_port_t *port = &PPORT;
45 	emlxs_config_t *cfg;
46 	MBUF_INFO *buf_info;
47 	MEMSEG *seg;
48 	MBUF_INFO bufinfo;
49 	int32_t i;
50 	MATCHMAP *mp;
51 	MATCHMAP **bpl_table;
52 
53 	buf_info = &bufinfo;
54 	cfg = &CFG;
55 
56 	bzero(hba->memseg, sizeof (hba->memseg));
57 
58 	/* Allocate the fc_table */
59 	bzero(buf_info, sizeof (MBUF_INFO));
60 	buf_info->size = (hba->max_iotag * sizeof (emlxs_buf_t *));
61 
62 	(void) emlxs_mem_alloc(hba, buf_info);
63 	if (buf_info->virt == NULL) {
64 
65 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
66 		    "fc_table buffer.");
67 
68 		goto failed;
69 	}
70 	hba->fc_table = buf_info->virt;
71 	bzero(hba->fc_table, buf_info->size);
72 
73 	/* Prepare the memory pools */
74 	for (i = 0; i < FC_MAX_SEG; i++) {
75 		seg = &hba->memseg[i];
76 
77 		switch (i) {
78 		case MEM_NLP:
79 			(void) strlcpy(seg->fc_label, "Node Pool",
80 			    sizeof (seg->fc_label));
81 			seg->fc_memtag	= MEM_NLP;
82 			seg->fc_memsize	= sizeof (NODELIST);
83 			seg->fc_hi_water = hba->max_nodes + 2;
84 			seg->fc_lo_water = 2;
85 			seg->fc_step = 1;
86 			break;
87 
88 		case MEM_IOCB:
89 			(void) strlcpy(seg->fc_label, "IOCB Pool",
90 			    sizeof (seg->fc_label));
91 			seg->fc_memtag	= MEM_IOCB;
92 			seg->fc_memsize	= sizeof (IOCBQ);
93 			seg->fc_hi_water = cfg[CFG_NUM_IOCBS].current;
94 			seg->fc_lo_water = cfg[CFG_NUM_IOCBS].low;
95 			seg->fc_step = cfg[CFG_NUM_IOCBS].low;
96 			break;
97 
98 		case MEM_MBOX:
99 			(void) strlcpy(seg->fc_label, "MBOX Pool",
100 			    sizeof (seg->fc_label));
101 			seg->fc_memtag	= MEM_MBOX;
102 			seg->fc_memsize	= sizeof (MAILBOXQ);
103 			seg->fc_hi_water = hba->max_nodes + 32;
104 			seg->fc_lo_water = 32;
105 			seg->fc_step = 1;
106 			break;
107 
108 		case MEM_BPL:
109 			if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
110 				continue;
111 			}
112 			(void) strlcpy(seg->fc_label, "BPL Pool",
113 			    sizeof (seg->fc_label));
114 			seg->fc_memtag	= MEM_BPL;
115 			seg->fc_memsize	= hba->sli.sli3.mem_bpl_size;
116 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
117 			seg->fc_memalign = 32;
118 			seg->fc_hi_water = hba->max_iotag;
119 			seg->fc_lo_water = cfg[CFG_NUM_IOCBS].low;
120 			seg->fc_step = cfg[CFG_NUM_IOCBS].low;
121 			break;
122 
123 		case MEM_BUF:
124 			/* These are the unsolicited ELS buffers. */
125 			(void) strlcpy(seg->fc_label, "BUF Pool",
126 			    sizeof (seg->fc_label));
127 			seg->fc_memtag	= MEM_BUF;
128 			seg->fc_memsize	= MEM_BUF_SIZE;
129 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
130 			seg->fc_memalign = 32;
131 			seg->fc_hi_water = MEM_ELSBUF_COUNT + MEM_BUF_COUNT;
132 			seg->fc_lo_water = MEM_ELSBUF_COUNT;
133 			seg->fc_step = 1;
134 			break;
135 
136 		case MEM_IPBUF:
137 			/* These are the unsolicited IP buffers. */
138 			if (cfg[CFG_NETWORK_ON].current == 0) {
139 				continue;
140 			}
141 
142 			(void) strlcpy(seg->fc_label, "IPBUF Pool",
143 			    sizeof (seg->fc_label));
144 			seg->fc_memtag	= MEM_IPBUF;
145 			seg->fc_memsize	= MEM_IPBUF_SIZE;
146 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
147 			seg->fc_memalign = 32;
148 			seg->fc_hi_water = MEM_IPBUF_COUNT;
149 			seg->fc_lo_water = 0;
150 			seg->fc_step = 4;
151 			break;
152 
153 		case MEM_CTBUF:
154 			/* These are the unsolicited CT buffers. */
155 			(void) strlcpy(seg->fc_label, "CTBUF Pool",
156 			    sizeof (seg->fc_label));
157 			seg->fc_memtag	= MEM_CTBUF;
158 			seg->fc_memsize	= MEM_CTBUF_SIZE;
159 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
160 			seg->fc_memalign = 32;
161 			seg->fc_hi_water = MEM_CTBUF_COUNT;
162 			seg->fc_lo_water = MEM_CTBUF_COUNT;
163 			seg->fc_step = 1;
164 			break;
165 
166 		case MEM_SGL1K:
167 			(void) strlcpy(seg->fc_label, "1K SGL Pool",
168 			    sizeof (seg->fc_label));
169 			seg->fc_memtag	= MEM_SGL1K;
170 			seg->fc_memsize	= 0x400;
171 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
172 			seg->fc_memalign = 32;
173 			seg->fc_hi_water = 0x5000;
174 			seg->fc_lo_water = 0;
175 			seg->fc_step = 0x100;
176 			break;
177 
178 		case MEM_SGL2K:
179 			(void) strlcpy(seg->fc_label, "2K SGL Pool",
180 			    sizeof (seg->fc_label));
181 			seg->fc_memtag	= MEM_SGL2K;
182 			seg->fc_memsize	= 0x800;
183 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
184 			seg->fc_memalign = 32;
185 			seg->fc_hi_water = 0x5000;
186 			seg->fc_lo_water = 0;
187 			seg->fc_step = 0x100;
188 			break;
189 
190 		case MEM_SGL4K:
191 			(void) strlcpy(seg->fc_label, "4K SGL Pool",
192 			    sizeof (seg->fc_label));
193 			seg->fc_memtag	= MEM_SGL4K;
194 			seg->fc_memsize	= 0x1000;
195 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
196 			seg->fc_memalign = 32;
197 			seg->fc_hi_water = 0x5000;
198 			seg->fc_lo_water = 0;
199 			seg->fc_step = 0x100;
200 			break;
201 
202 #ifdef SFCT_SUPPORT
203 		case MEM_FCTBUF:
204 			/* These are the unsolicited FCT buffers. */
205 			if (!(port->flag & EMLXS_TGT_ENABLED)) {
206 				continue;
207 			}
208 
209 			(void) strlcpy(seg->fc_label, "FCTBUF Pool",
210 			    sizeof (seg->fc_label));
211 			seg->fc_memtag	= MEM_FCTBUF;
212 			seg->fc_memsize	= MEM_FCTBUF_SIZE;
213 			seg->fc_memflag	= FC_MBUF_DMA | FC_MBUF_SNGLSG;
214 			seg->fc_memalign = 32;
215 			seg->fc_hi_water = MEM_FCTBUF_COUNT;
216 			seg->fc_lo_water = 0;
217 			seg->fc_step = 8;
218 			break;
219 #endif /* SFCT_SUPPORT */
220 
221 		default:
222 			continue;
223 		}
224 
225 		if (seg->fc_memsize == 0) {
226 			continue;
227 		}
228 
229 		(void) emlxs_mem_pool_create(hba, seg);
230 
231 		if (seg->fc_numblks < seg->fc_lo_water) {
232 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
233 			    "%s: count=%d size=%d flags=%x lo=%d hi=%d",
234 			    seg->fc_label, seg->fc_numblks,
235 			    seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
236 			    seg->fc_hi_water);
237 
238 			goto failed;
239 		}
240 	}
241 
242 	hba->sli.sli3.bpl_table = NULL;
243 	seg = &hba->memseg[MEM_BPL];
244 
245 	/* If SLI3 and MEM_BPL pool is static */
246 	if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK) &&
247 	    !(seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
248 		/*
249 		 * Allocate and Initialize bpl_table
250 		 * This is for increased performance.
251 		 */
252 		bzero(buf_info, sizeof (MBUF_INFO));
253 		buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
254 
255 		(void) emlxs_mem_alloc(hba, buf_info);
256 		if (buf_info->virt == NULL) {
257 
258 			EMLXS_MSGF(EMLXS_CONTEXT,
259 			    &emlxs_mem_alloc_failed_msg,
260 			    "BPL table buffer.");
261 
262 			goto failed;
263 		}
264 		hba->sli.sli3.bpl_table = buf_info->virt;
265 
266 		bpl_table = (MATCHMAP**)hba->sli.sli3.bpl_table;
267 		for (i = 0; i < hba->max_iotag; i++) {
268 			mp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL);
269 			mp->flag |= MAP_TABLE_ALLOCATED;
270 			bpl_table[i] = mp;
271 		}
272 	}
273 
274 	return (1);
275 
276 failed:
277 
278 	(void) emlxs_mem_free_buffer(hba);
279 	return (0);
280 
281 } /* emlxs_mem_alloc_buffer() */
282 
283 
284 /*
285  * emlxs_mem_free_buffer
286  *
287  * This routine will free iocb/data buffer space
288  * and TGTM resource.
289  */
290 extern int
emlxs_mem_free_buffer(emlxs_hba_t * hba)291 emlxs_mem_free_buffer(emlxs_hba_t *hba)
292 {
293 	emlxs_port_t *port = &PPORT;
294 	emlxs_port_t *vport;
295 	int32_t j;
296 	MATCHMAP *mp;
297 	CHANNEL *cp;
298 	RING *rp;
299 	MBUF_INFO *buf_info;
300 	MBUF_INFO bufinfo;
301 	MATCHMAP **bpl_table;
302 
303 	buf_info = &bufinfo;
304 
305 	for (j = 0; j < hba->chan_count; j++) {
306 		cp = &hba->chan[j];
307 
308 		/* Flush the ring */
309 		(void) emlxs_tx_channel_flush(hba, cp, 0);
310 	}
311 
312 	if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK)) {
313 		/* free the mapped address match area for each ring */
314 		for (j = 0; j < MAX_RINGS; j++) {
315 			rp = &hba->sli.sli3.ring[j];
316 
317 			while (rp->fc_mpoff) {
318 				uint64_t addr;
319 
320 				addr = 0;
321 				mp = (MATCHMAP *)(rp->fc_mpoff);
322 
323 				if ((j == hba->channel_els) ||
324 				    (j == hba->channel_ct) ||
325 #ifdef SFCT_SUPPORT
326 				    (j == hba->CHANNEL_FCT) ||
327 #endif /* SFCT_SUPPORT */
328 				    (j == hba->channel_ip)) {
329 					addr = mp->phys;
330 				}
331 
332 				if ((mp = emlxs_mem_get_vaddr(hba, rp, addr))) {
333 					if (j == hba->channel_els) {
334 						emlxs_mem_put(hba,
335 						    MEM_ELSBUF, (void *)mp);
336 					} else if (j == hba->channel_ct) {
337 						emlxs_mem_put(hba,
338 						    MEM_CTBUF, (void *)mp);
339 					} else if (j == hba->channel_ip) {
340 						emlxs_mem_put(hba,
341 						    MEM_IPBUF, (void *)mp);
342 					}
343 #ifdef SFCT_SUPPORT
344 					else if (j == hba->CHANNEL_FCT) {
345 						emlxs_mem_put(hba,
346 						    MEM_FCTBUF, (void *)mp);
347 					}
348 #endif /* SFCT_SUPPORT */
349 
350 				}
351 			}
352 		}
353 	}
354 
355 	if (hba->flag & FC_HBQ_ENABLED) {
356 		emlxs_hbq_free_all(hba, EMLXS_ELS_HBQ_ID);
357 		emlxs_hbq_free_all(hba, EMLXS_IP_HBQ_ID);
358 		emlxs_hbq_free_all(hba, EMLXS_CT_HBQ_ID);
359 
360 		if (port->flag & EMLXS_TGT_ENABLED) {
361 			emlxs_hbq_free_all(hba, EMLXS_FCT_HBQ_ID);
362 		}
363 	}
364 
365 	/* Free the nodes */
366 	for (j = 0; j < MAX_VPORTS; j++) {
367 		vport = &VPORT(j);
368 		if (vport->node_count) {
369 			emlxs_node_destroy_all(vport);
370 		}
371 	}
372 
373 	/* Make sure the mailbox queue is empty */
374 	emlxs_mb_flush(hba);
375 
376 	if (hba->fc_table) {
377 		bzero(buf_info, sizeof (MBUF_INFO));
378 		buf_info->size = hba->max_iotag * sizeof (emlxs_buf_t *);
379 		buf_info->virt = hba->fc_table;
380 		emlxs_mem_free(hba, buf_info);
381 		hba->fc_table = NULL;
382 	}
383 
384 	if (hba->sli.sli3.bpl_table) {
385 		/* Return MEM_BPLs to their pool */
386 		bpl_table = (MATCHMAP**)hba->sli.sli3.bpl_table;
387 		for (j = 0; j < hba->max_iotag; j++) {
388 			mp = bpl_table[j];
389 			mp->flag &= ~MAP_TABLE_ALLOCATED;
390 			emlxs_mem_put(hba, MEM_BPL, (void*)mp);
391 		}
392 
393 		bzero(buf_info, sizeof (MBUF_INFO));
394 		buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
395 		buf_info->virt = hba->sli.sli3.bpl_table;
396 		emlxs_mem_free(hba, buf_info);
397 		hba->sli.sli3.bpl_table = NULL;
398 	}
399 
400 	/* Free the memory segments */
401 	for (j = 0; j < FC_MAX_SEG; j++) {
402 		emlxs_mem_pool_destroy(hba, &hba->memseg[j]);
403 	}
404 
405 	return (0);
406 
407 } /* emlxs_mem_free_buffer() */
408 
409 
410 /* Must hold EMLXS_MEMGET_LOCK when calling */
411 static uint32_t
emlxs_mem_pool_alloc(emlxs_hba_t * hba,MEMSEG * seg,uint32_t count)412 emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count)
413 {
414 	emlxs_port_t *port = &PPORT;
415 	uint8_t *bp = NULL;
416 	MATCHMAP *mp = NULL;
417 	MBUF_INFO *buf_info;
418 	MBUF_INFO local_buf_info;
419 	uint32_t i;
420 	uint32_t fc_numblks;
421 
422 	if (seg->fc_memsize == 0) {
423 		return (0);
424 	}
425 
426 	if (seg->fc_numblks >= seg->fc_hi_water) {
427 		return (0);
428 	}
429 
430 	if (count == 0) {
431 		return (0);
432 	}
433 
434 	if (count > (seg->fc_hi_water - seg->fc_numblks)) {
435 		count = (seg->fc_hi_water - seg->fc_numblks);
436 	}
437 
438 	buf_info = &local_buf_info;
439 	fc_numblks = seg->fc_numblks;
440 
441 	/* Check for initial allocation */
442 	if (!(seg->fc_memflag & FC_MEMSEG_PUT_ENABLED)) {
443 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
444 		    "%s alloc:%d n=%d s=%d f=%x l=%d,%d,%d "
445 		    "f=%d:%d",
446 		    seg->fc_label, count, seg->fc_numblks,
447 		    seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
448 		    seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
449 		    seg->fc_low);
450 	}
451 
452 	if (!(seg->fc_memflag & FC_MBUF_DMA)) {
453 		goto vmem_pool;
454 	}
455 
456 /* dma_pool */
457 
458 	for (i = 0; i < count; i++) {
459 		bzero(buf_info, sizeof (MBUF_INFO));
460 		buf_info->size = sizeof (MATCHMAP);
461 		buf_info->align = sizeof (void *);
462 
463 		(void) emlxs_mem_alloc(hba, buf_info);
464 		if (buf_info->virt == NULL) {
465 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
466 			    "%s: count=%d size=%d",
467 			    seg->fc_label, seg->fc_numblks, seg->fc_memsize);
468 
469 			goto done;
470 		}
471 
472 		mp = (MATCHMAP *)buf_info->virt;
473 		bzero(mp, sizeof (MATCHMAP));
474 
475 		bzero(buf_info, sizeof (MBUF_INFO));
476 		buf_info->size  = seg->fc_memsize;
477 		buf_info->flags = seg->fc_memflag;
478 		buf_info->align = seg->fc_memalign;
479 
480 		(void) emlxs_mem_alloc(hba, buf_info);
481 		if (buf_info->virt == NULL) {
482 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
483 			    "%s: count=%d size=%d",
484 			    seg->fc_label, seg->fc_numblks, seg->fc_memsize);
485 
486 			/* Free the mp object */
487 			bzero(buf_info, sizeof (MBUF_INFO));
488 			buf_info->size = sizeof (MATCHMAP);
489 			buf_info->virt = (void *)mp;
490 			emlxs_mem_free(hba, buf_info);
491 
492 			goto done;
493 		}
494 		bp = (uint8_t *)buf_info->virt;
495 		bzero(bp, seg->fc_memsize);
496 
497 		mp->virt = buf_info->virt;
498 		mp->phys = buf_info->phys;
499 		mp->size = buf_info->size;
500 		mp->dma_handle = buf_info->dma_handle;
501 		mp->data_handle = buf_info->data_handle;
502 		mp->tag = seg->fc_memtag;
503 		mp->segment = seg;
504 		mp->flag |= MAP_POOL_ALLOCATED;
505 
506 #ifdef SFCT_SUPPORT
507 		if (mp->tag >= MEM_FCTSEG) {
508 			if (emlxs_fct_stmf_alloc(hba, mp)) {
509 				/* Free the DMA memory itself */
510 				emlxs_mem_free(hba, buf_info);
511 
512 				/* Free the mp object */
513 				bzero(buf_info, sizeof (MBUF_INFO));
514 				buf_info->size = sizeof (MATCHMAP);
515 				buf_info->virt = (void *)mp;
516 				emlxs_mem_free(hba, buf_info);
517 
518 				goto done;
519 			}
520 		}
521 #endif /* SFCT_SUPPORT */
522 
523 		/* Add the buffer desc to the tail of the pool freelist */
524 		if (seg->fc_memget_end == NULL) {
525 			seg->fc_memget_ptr = (uint8_t *)mp;
526 			seg->fc_memget_cnt = 1;
527 		} else {
528 			*((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)mp;
529 			seg->fc_memget_cnt++;
530 		}
531 		seg->fc_memget_end = (uint8_t *)mp;
532 
533 		seg->fc_numblks++;
534 		seg->fc_total_memsize += (seg->fc_memsize + sizeof (MATCHMAP));
535 	}
536 
537 	goto done;
538 
539 vmem_pool:
540 
541 	for (i = 0; i < count; i++) {
542 		bzero(buf_info, sizeof (MBUF_INFO));
543 		buf_info->size  = seg->fc_memsize;
544 
545 		(void) emlxs_mem_alloc(hba, buf_info);
546 		if (buf_info->virt == NULL) {
547 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
548 			    "%s: count=%d size=%d",
549 			    seg->fc_label, seg->fc_numblks, seg->fc_memsize);
550 
551 			goto done;
552 		}
553 		bp = (uint8_t *)buf_info->virt;
554 
555 		/* Add the buffer to the tail of the pool freelist */
556 		if (seg->fc_memget_end == NULL) {
557 			seg->fc_memget_ptr = (uint8_t *)bp;
558 			seg->fc_memget_cnt = 1;
559 		} else {
560 			*((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)bp;
561 			seg->fc_memget_cnt++;
562 		}
563 		seg->fc_memget_end = (uint8_t *)bp;
564 
565 		seg->fc_numblks++;
566 		seg->fc_total_memsize += seg->fc_memsize;
567 	}
568 
569 done:
570 
571 	return ((seg->fc_numblks - fc_numblks));
572 
573 } /* emlxs_mem_pool_alloc() */
574 
575 
576 /* Must hold EMLXS_MEMGET_LOCK & EMLXS_MEMPUT_LOCK when calling */
577 static void
emlxs_mem_pool_free(emlxs_hba_t * hba,MEMSEG * seg,uint32_t count)578 emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count)
579 {
580 	emlxs_port_t *port = &PPORT;
581 	uint8_t *bp = NULL;
582 	MATCHMAP *mp = NULL;
583 	MBUF_INFO *buf_info;
584 	MBUF_INFO local_buf_info;
585 
586 	if ((seg->fc_memsize == 0) ||
587 	    (seg->fc_numblks == 0) ||
588 	    (count == 0)) {
589 		return;
590 	}
591 
592 	/* Check max count */
593 	if (count > seg->fc_numblks) {
594 		count = seg->fc_numblks;
595 	}
596 
597 	/* Move memput list to memget list */
598 	if (seg->fc_memput_ptr) {
599 		if (seg->fc_memget_end == NULL) {
600 			seg->fc_memget_ptr = seg->fc_memput_ptr;
601 		} else {
602 			*((uint8_t **)(seg->fc_memget_end)) =\
603 			    seg->fc_memput_ptr;
604 		}
605 		seg->fc_memget_end = seg->fc_memput_end;
606 		seg->fc_memget_cnt += seg->fc_memput_cnt;
607 
608 		seg->fc_memput_ptr = NULL;
609 		seg->fc_memput_end = NULL;
610 		seg->fc_memput_cnt = 0;
611 	}
612 
613 	buf_info = &local_buf_info;
614 
615 	/* Check for final deallocation */
616 	if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
617 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
618 		    "%s free:%d n=%d s=%d f=%x l=%d,%d,%d "
619 		    "f=%d:%d",
620 		    seg->fc_label, count, seg->fc_numblks,
621 		    seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
622 		    seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
623 		    seg->fc_low);
624 	}
625 
626 	if (!(seg->fc_memflag & FC_MBUF_DMA)) {
627 		goto vmem_pool;
628 	}
629 
630 dma_pool:
631 
632 	/* Free memory associated with all buffers on get buffer pool */
633 	while (count && ((bp = seg->fc_memget_ptr) != NULL)) {
634 		/* Remove buffer from list */
635 		if (seg->fc_memget_end == bp) {
636 			seg->fc_memget_ptr = NULL;
637 			seg->fc_memget_end = NULL;
638 			seg->fc_memget_cnt = 0;
639 
640 		} else {
641 			seg->fc_memget_ptr = *((uint8_t **)bp);
642 			seg->fc_memget_cnt--;
643 		}
644 		mp = (MATCHMAP *)bp;
645 
646 #ifdef SFCT_SUPPORT
647 		if (mp->tag >= MEM_FCTSEG) {
648 			emlxs_fct_stmf_free(hba, mp);
649 		}
650 #endif /* SFCT_SUPPORT */
651 
652 		/* Free the DMA memory itself */
653 		bzero(buf_info, sizeof (MBUF_INFO));
654 		buf_info->size = mp->size;
655 		buf_info->virt = mp->virt;
656 		buf_info->phys = mp->phys;
657 		buf_info->dma_handle = mp->dma_handle;
658 		buf_info->data_handle = mp->data_handle;
659 		buf_info->flags = seg->fc_memflag;
660 		emlxs_mem_free(hba, buf_info);
661 
662 		/* Free the handle */
663 		bzero(buf_info, sizeof (MBUF_INFO));
664 		buf_info->size = sizeof (MATCHMAP);
665 		buf_info->virt = (void *)mp;
666 		emlxs_mem_free(hba, buf_info);
667 
668 		seg->fc_numblks--;
669 		seg->fc_total_memsize -= (seg->fc_memsize + sizeof (MATCHMAP));
670 
671 		count--;
672 	}
673 
674 	return;
675 
676 vmem_pool:
677 
678 	/* Free memory associated with all buffers on get buffer pool */
679 	while (count && ((bp = seg->fc_memget_ptr) != NULL)) {
680 		/* Remove buffer from list */
681 		if (seg->fc_memget_end == bp) {
682 			seg->fc_memget_ptr = NULL;
683 			seg->fc_memget_end = NULL;
684 			seg->fc_memget_cnt = 0;
685 
686 		} else {
687 			seg->fc_memget_ptr = *((uint8_t **)bp);
688 			seg->fc_memget_cnt--;
689 		}
690 
691 		/* Free the Virtual memory itself */
692 		bzero(buf_info, sizeof (MBUF_INFO));
693 		buf_info->size = seg->fc_memsize;
694 		buf_info->virt = bp;
695 		emlxs_mem_free(hba, buf_info);
696 
697 		seg->fc_numblks--;
698 		seg->fc_total_memsize -= seg->fc_memsize;
699 
700 		count--;
701 	}
702 
703 	return;
704 
705 } /* emlxs_mem_pool_free() */
706 
707 
708 extern uint32_t
emlxs_mem_pool_create(emlxs_hba_t * hba,MEMSEG * seg)709 emlxs_mem_pool_create(emlxs_hba_t *hba, MEMSEG *seg)
710 {
711 	emlxs_config_t *cfg = &CFG;
712 
713 	mutex_enter(&EMLXS_MEMGET_LOCK);
714 	mutex_enter(&EMLXS_MEMPUT_LOCK);
715 
716 	if (seg->fc_memsize == 0) {
717 		mutex_exit(&EMLXS_MEMPUT_LOCK);
718 		mutex_exit(&EMLXS_MEMGET_LOCK);
719 
720 		return (0);
721 	}
722 
723 	/* Sanity check hi > lo */
724 	if (seg->fc_lo_water > seg->fc_hi_water) {
725 		seg->fc_hi_water = seg->fc_lo_water;
726 	}
727 
728 	/* If dynamic pools are disabled, then force pool to max level */
729 	if (cfg[CFG_MEM_DYNAMIC].current == 0) {
730 		seg->fc_lo_water = seg->fc_hi_water;
731 	}
732 
733 	/* If pool is dynamic, then fc_step must be >0 */
734 	/* Otherwise, fc_step must be 0 */
735 	if (seg->fc_lo_water != seg->fc_hi_water) {
736 		seg->fc_memflag |= FC_MEMSEG_DYNAMIC;
737 
738 		if (seg->fc_step == 0) {
739 			seg->fc_step = 1;
740 		}
741 	} else {
742 		seg->fc_step = 0;
743 	}
744 
745 	seg->fc_numblks = 0;
746 	seg->fc_total_memsize = 0;
747 	seg->fc_low = 0;
748 
749 	(void) emlxs_mem_pool_alloc(hba, seg, seg->fc_lo_water);
750 
751 	seg->fc_memflag |= (FC_MEMSEG_PUT_ENABLED|FC_MEMSEG_GET_ENABLED);
752 
753 	mutex_exit(&EMLXS_MEMPUT_LOCK);
754 	mutex_exit(&EMLXS_MEMGET_LOCK);
755 
756 	return (seg->fc_numblks);
757 
758 } /* emlxs_mem_pool_create() */
759 
760 
761 extern void
emlxs_mem_pool_destroy(emlxs_hba_t * hba,MEMSEG * seg)762 emlxs_mem_pool_destroy(emlxs_hba_t *hba, MEMSEG *seg)
763 {
764 	emlxs_port_t *port = &PPORT;
765 
766 	mutex_enter(&EMLXS_MEMGET_LOCK);
767 	mutex_enter(&EMLXS_MEMPUT_LOCK);
768 
769 	if (seg->fc_memsize == 0) {
770 		mutex_exit(&EMLXS_MEMPUT_LOCK);
771 		mutex_exit(&EMLXS_MEMGET_LOCK);
772 		return;
773 	}
774 
775 	/* Leave FC_MEMSEG_PUT_ENABLED set for now */
776 	seg->fc_memflag &= ~FC_MEMSEG_GET_ENABLED;
777 
778 	/* Try to free all objects */
779 	emlxs_mem_pool_free(hba, seg, seg->fc_numblks);
780 
781 	if (seg->fc_numblks) {
782 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
783 		    "mem_pool_destroy: %s leak detected: "
784 		    "%d objects still allocated.",
785 		    seg->fc_label, seg->fc_numblks);
786 	} else {
787 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
788 		    "mem_pool_destroy: %s destroyed.",
789 		    seg->fc_label);
790 
791 		/* Clear all */
792 		bzero(seg, sizeof (MEMSEG));
793 	}
794 
795 	mutex_exit(&EMLXS_MEMPUT_LOCK);
796 	mutex_exit(&EMLXS_MEMGET_LOCK);
797 
798 	return;
799 
800 } /* emlxs_mem_pool_destroy() */
801 
802 
803 extern void
emlxs_mem_pool_clean(emlxs_hba_t * hba,MEMSEG * seg)804 emlxs_mem_pool_clean(emlxs_hba_t *hba, MEMSEG *seg)
805 {
806 	emlxs_port_t *port = &PPORT;
807 	uint32_t clean_count;
808 	uint32_t free_count;
809 	uint32_t free_pad;
810 
811 	mutex_enter(&EMLXS_MEMGET_LOCK);
812 	mutex_enter(&EMLXS_MEMPUT_LOCK);
813 
814 	if (!(seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
815 		mutex_exit(&EMLXS_MEMPUT_LOCK);
816 		mutex_exit(&EMLXS_MEMGET_LOCK);
817 		return;
818 	}
819 
820 	if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
821 		goto done;
822 	}
823 
824 #ifdef EMLXS_POOL_DEBUG
825 	EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
826 	    "%s clean: n=%d s=%d f=%x l=%d,%d,%d "
827 	    "f=%d:%d",
828 	    seg->fc_label, seg->fc_numblks,
829 	    seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
830 	    seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
831 	    seg->fc_low);
832 #endif /* EMLXS_POOL_DEBUG */
833 
834 	/* Calculatge current free count */
835 	free_count = (seg->fc_memget_cnt + seg->fc_memput_cnt);
836 
837 	/* Reset fc_low value to current free count */
838 	clean_count = seg->fc_low;
839 	seg->fc_low = free_count;
840 
841 	/* Return if pool is already at lo water mark */
842 	if (seg->fc_numblks <= seg->fc_lo_water) {
843 		goto done;
844 	}
845 
846 	/* Return if there is nothing to clean */
847 	if ((free_count == 0) ||
848 	    (clean_count <= 1)) {
849 		goto done;
850 	}
851 
852 	/* Calculate a 3 percent free pad count (1 being minimum) */
853 	if (seg->fc_numblks > 66) {
854 		free_pad = ((seg->fc_numblks * 3)/100);
855 	} else {
856 		free_pad = 1;
857 	}
858 
859 	/* Return if fc_low is below pool free pad */
860 	if (clean_count <= free_pad) {
861 		goto done;
862 	}
863 
864 	clean_count -= free_pad;
865 
866 	/* clean_count can't exceed minimum pool levels */
867 	if (clean_count > (seg->fc_numblks - seg->fc_lo_water)) {
868 		clean_count = (seg->fc_numblks - seg->fc_lo_water);
869 	}
870 
871 	emlxs_mem_pool_free(hba, seg, clean_count);
872 
873 done:
874 	if (seg->fc_last != seg->fc_numblks) {
875 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
876 		    "%s update: n=%d->%d s=%d f=%x l=%d,%d,%d "
877 		    "f=%d:%d",
878 		    seg->fc_label, seg->fc_last, seg->fc_numblks,
879 		    seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
880 		    seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
881 		    seg->fc_low);
882 
883 		seg->fc_last = seg->fc_numblks;
884 	}
885 
886 	mutex_exit(&EMLXS_MEMPUT_LOCK);
887 	mutex_exit(&EMLXS_MEMGET_LOCK);
888 	return;
889 
890 } /* emlxs_mem_pool_clean() */
891 
892 
893 extern void *
emlxs_mem_pool_get(emlxs_hba_t * hba,MEMSEG * seg)894 emlxs_mem_pool_get(emlxs_hba_t *hba, MEMSEG *seg)
895 {
896 	emlxs_port_t	*port = &PPORT;
897 	void		*bp = NULL;
898 	MATCHMAP	*mp;
899 	uint32_t	free_count;
900 
901 	mutex_enter(&EMLXS_MEMGET_LOCK);
902 
903 	/* Check if memory pool is GET enabled */
904 	if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
905 		mutex_exit(&EMLXS_MEMGET_LOCK);
906 		return (NULL);
907 	}
908 
909 	/* If no entries on memget list, then check memput list */
910 	if (!seg->fc_memget_ptr) {
911 		mutex_enter(&EMLXS_MEMPUT_LOCK);
912 		if (seg->fc_memput_ptr) {
913 			/*
914 			 * Move list from memput to memget
915 			 */
916 			seg->fc_memget_ptr = seg->fc_memput_ptr;
917 			seg->fc_memget_end = seg->fc_memput_end;
918 			seg->fc_memget_cnt = seg->fc_memput_cnt;
919 			seg->fc_memput_ptr = NULL;
920 			seg->fc_memput_end = NULL;
921 			seg->fc_memput_cnt = 0;
922 		}
923 		mutex_exit(&EMLXS_MEMPUT_LOCK);
924 	}
925 
926 	/* If no entries on memget list, then pool is empty */
927 	/* Try to allocate more if pool is dynamic */
928 	if (!seg->fc_memget_ptr &&
929 	    (seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
930 		(void) emlxs_mem_pool_alloc(hba, seg,  seg->fc_step);
931 		seg->fc_low = 0;
932 	}
933 
934 	/* If no entries on memget list, then pool is empty */
935 	if (!seg->fc_memget_ptr) {
936 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_alloc_failed_msg,
937 		    "%s empty.", seg->fc_label);
938 
939 		mutex_exit(&EMLXS_MEMGET_LOCK);
940 		return (NULL);
941 	}
942 
943 	/* Remove an entry from the get list */
944 	bp = seg->fc_memget_ptr;
945 
946 	if (seg->fc_memget_end == bp) {
947 		seg->fc_memget_ptr = NULL;
948 		seg->fc_memget_end = NULL;
949 		seg->fc_memget_cnt = 0;
950 
951 	} else {
952 		seg->fc_memget_ptr = *((uint8_t **)bp);
953 		seg->fc_memget_cnt--;
954 	}
955 
956 	/* Initialize buffer */
957 	if (!(seg->fc_memflag & FC_MBUF_DMA)) {
958 		bzero(bp, seg->fc_memsize);
959 	} else {
960 		mp = (MATCHMAP *)bp;
961 		mp->fc_mptr = NULL;
962 		mp->flag |= MAP_POOL_ALLOCATED;
963 	}
964 
965 	/* Set fc_low if pool is dynamic */
966 	if (seg->fc_memflag & FC_MEMSEG_DYNAMIC) {
967 		free_count = (seg->fc_memget_cnt + seg->fc_memput_cnt);
968 		if (free_count < seg->fc_low) {
969 			seg->fc_low = free_count;
970 		}
971 	}
972 
973 	mutex_exit(&EMLXS_MEMGET_LOCK);
974 
975 	return (bp);
976 
977 } /* emlxs_mem_pool_get() */
978 
979 
980 extern void
emlxs_mem_pool_put(emlxs_hba_t * hba,MEMSEG * seg,void * bp)981 emlxs_mem_pool_put(emlxs_hba_t *hba, MEMSEG *seg, void *bp)
982 {
983 	emlxs_port_t	*port = &PPORT;
984 	MATCHMAP	*mp;
985 
986 	/* Free the pool object */
987 	mutex_enter(&EMLXS_MEMPUT_LOCK);
988 
989 	/* Check if memory pool is PUT enabled */
990 	if (!(seg->fc_memflag & FC_MEMSEG_PUT_ENABLED)) {
991 		mutex_exit(&EMLXS_MEMPUT_LOCK);
992 		return;
993 	}
994 
995 	/* Check if buffer was just freed */
996 	if ((seg->fc_memput_end == bp) || (seg->fc_memget_end == bp)) {
997 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
998 		    "%s: Freeing free object: bp=%p", seg->fc_label, bp);
999 
1000 		mutex_exit(&EMLXS_MEMPUT_LOCK);
1001 		return;
1002 	}
1003 
1004 	/* Validate DMA buffer */
1005 	if (seg->fc_memflag & FC_MBUF_DMA) {
1006 		mp = (MATCHMAP *)bp;
1007 
1008 		if (!(mp->flag & MAP_POOL_ALLOCATED) ||
1009 		    (mp->segment != seg)) {
1010 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1011 			    "mem_pool_put: %s invalid: mp=%p " \
1012 			    "tag=0x%x flag=%x", seg->fc_label,
1013 			    mp, mp->tag, mp->flag);
1014 
1015 			EMLXS_STATE_CHANGE(hba, FC_ERROR);
1016 
1017 			mutex_exit(&EMLXS_MEMPUT_LOCK);
1018 
1019 			emlxs_thread_spawn(hba, emlxs_shutdown_thread,
1020 			    NULL, NULL);
1021 
1022 			return;
1023 		}
1024 	}
1025 
1026 	/* Release buffer to the end of the memput list */
1027 	if (seg->fc_memput_end == NULL) {
1028 		seg->fc_memput_ptr = bp;
1029 		seg->fc_memput_cnt = 1;
1030 	} else {
1031 		*((void **)(seg->fc_memput_end)) = bp;
1032 		seg->fc_memput_cnt++;
1033 	}
1034 	seg->fc_memput_end = bp;
1035 	*((void **)(bp)) = NULL;
1036 
1037 	mutex_exit(&EMLXS_MEMPUT_LOCK);
1038 
1039 	/* This is for late PUT's after an initial */
1040 	/* emlxs_mem_pool_destroy call */
1041 	if ((seg->fc_memflag & FC_MEMSEG_PUT_ENABLED) &&
1042 	    !(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
1043 		emlxs_mem_pool_destroy(hba, seg);
1044 	}
1045 
1046 	return;
1047 
1048 } /* emlxs_mem_pool_put() */
1049 
1050 
1051 extern MATCHMAP *
emlxs_mem_buf_alloc(emlxs_hba_t * hba,uint32_t size)1052 emlxs_mem_buf_alloc(emlxs_hba_t *hba, uint32_t size)
1053 {
1054 	emlxs_port_t *port = &PPORT;
1055 	uint8_t *bp = NULL;
1056 	MATCHMAP *mp = NULL;
1057 	MBUF_INFO *buf_info;
1058 	MBUF_INFO bufinfo;
1059 
1060 	buf_info = &bufinfo;
1061 
1062 	bzero(buf_info, sizeof (MBUF_INFO));
1063 	buf_info->size = sizeof (MATCHMAP);
1064 	buf_info->align = sizeof (void *);
1065 
1066 	(void) emlxs_mem_alloc(hba, buf_info);
1067 	if (buf_info->virt == NULL) {
1068 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1069 		    "MEM_BUF_ALLOC buffer.");
1070 
1071 		return (NULL);
1072 	}
1073 
1074 	mp = (MATCHMAP *)buf_info->virt;
1075 	bzero(mp, sizeof (MATCHMAP));
1076 
1077 	bzero(buf_info, sizeof (MBUF_INFO));
1078 	buf_info->size = size;
1079 	buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG;
1080 	buf_info->align = 32;
1081 
1082 	(void) emlxs_mem_alloc(hba, buf_info);
1083 	if (buf_info->virt == NULL) {
1084 
1085 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1086 		    "MEM_BUF_ALLOC DMA buffer.");
1087 
1088 		/* Free the mp object */
1089 		bzero(buf_info, sizeof (MBUF_INFO));
1090 		buf_info->size = sizeof (MATCHMAP);
1091 		buf_info->virt = (void *)mp;
1092 		emlxs_mem_free(hba, buf_info);
1093 
1094 		return (NULL);
1095 	}
1096 	bp = (uint8_t *)buf_info->virt;
1097 	bzero(bp, buf_info->size);
1098 
1099 	mp->virt = buf_info->virt;
1100 	mp->phys = buf_info->phys;
1101 	mp->size = buf_info->size;
1102 	mp->dma_handle = buf_info->dma_handle;
1103 	mp->data_handle = buf_info->data_handle;
1104 	mp->tag = MEM_BUF;
1105 	mp->flag |= MAP_BUF_ALLOCATED;
1106 
1107 	return (mp);
1108 
1109 } /* emlxs_mem_buf_alloc() */
1110 
1111 
1112 extern void
emlxs_mem_buf_free(emlxs_hba_t * hba,MATCHMAP * mp)1113 emlxs_mem_buf_free(emlxs_hba_t *hba, MATCHMAP *mp)
1114 {
1115 	MBUF_INFO bufinfo;
1116 	MBUF_INFO *buf_info;
1117 
1118 	buf_info = &bufinfo;
1119 
1120 	if (!(mp->flag & MAP_BUF_ALLOCATED)) {
1121 		return;
1122 	}
1123 
1124 	bzero(buf_info, sizeof (MBUF_INFO));
1125 	buf_info->size = mp->size;
1126 	buf_info->virt = mp->virt;
1127 	buf_info->phys = mp->phys;
1128 	buf_info->dma_handle = mp->dma_handle;
1129 	buf_info->data_handle = mp->data_handle;
1130 	buf_info->flags = FC_MBUF_DMA;
1131 	emlxs_mem_free(hba, buf_info);
1132 
1133 	bzero(buf_info, sizeof (MBUF_INFO));
1134 	buf_info->size = sizeof (MATCHMAP);
1135 	buf_info->virt = (void *)mp;
1136 	emlxs_mem_free(hba, buf_info);
1137 
1138 	return;
1139 
1140 } /* emlxs_mem_buf_free() */
1141 
1142 
1143 extern void *
emlxs_mem_get(emlxs_hba_t * hba,uint32_t seg_id)1144 emlxs_mem_get(emlxs_hba_t *hba, uint32_t seg_id)
1145 {
1146 	emlxs_port_t	*port = &PPORT;
1147 	void		*bp;
1148 	MAILBOXQ	*mbq;
1149 	IOCBQ		*iocbq;
1150 	NODELIST	*node;
1151 	MEMSEG		*seg;
1152 
1153 	if (seg_id >= FC_MAX_SEG) {
1154 
1155 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1156 		    "mem_get: Invalid segment id = %d",
1157 		    seg_id);
1158 
1159 		return (NULL);
1160 	}
1161 	seg = &hba->memseg[seg_id];
1162 
1163 	/* Alloc a buffer from the pool */
1164 	bp = emlxs_mem_pool_get(hba, seg);
1165 
1166 	if (bp) {
1167 		switch (seg_id) {
1168 		case MEM_MBOX:
1169 			mbq = (MAILBOXQ *)bp;
1170 			mbq->flag |= MBQ_POOL_ALLOCATED;
1171 			break;
1172 
1173 		case MEM_IOCB:
1174 			iocbq = (IOCBQ *)bp;
1175 			iocbq->flag |= IOCB_POOL_ALLOCATED;
1176 			break;
1177 
1178 		case MEM_NLP:
1179 			node = (NODELIST *)bp;
1180 			node->flag |= NODE_POOL_ALLOCATED;
1181 			break;
1182 		}
1183 	}
1184 
1185 	return (bp);
1186 
1187 } /* emlxs_mem_get() */
1188 
1189 
1190 extern void
emlxs_mem_put(emlxs_hba_t * hba,uint32_t seg_id,void * bp)1191 emlxs_mem_put(emlxs_hba_t *hba, uint32_t seg_id, void *bp)
1192 {
1193 	emlxs_port_t	*port = &PPORT;
1194 	MAILBOXQ	*mbq;
1195 	IOCBQ		*iocbq;
1196 	NODELIST	*node;
1197 	MEMSEG		*seg;
1198 	MATCHMAP	*mp;
1199 
1200 	if (seg_id >= FC_MAX_SEG) {
1201 
1202 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1203 		    "mem_put: Invalid segment id = %d: bp=%p",
1204 		    seg_id, bp);
1205 
1206 		return;
1207 	}
1208 	seg = &hba->memseg[seg_id];
1209 
1210 	/* Verify buffer */
1211 	switch (seg_id) {
1212 	case MEM_MBOX:
1213 		mbq = (MAILBOXQ *)bp;
1214 
1215 		if (!(mbq->flag & MBQ_POOL_ALLOCATED)) {
1216 			return;
1217 		}
1218 		break;
1219 
1220 	case MEM_IOCB:
1221 		iocbq = (IOCBQ *)bp;
1222 
1223 		if (!(iocbq->flag & IOCB_POOL_ALLOCATED)) {
1224 			return;
1225 		}
1226 
1227 		/* Any IOCBQ with a packet attached did not come */
1228 		/* from our pool */
1229 		if (iocbq->sbp) {
1230 			return;
1231 		}
1232 		break;
1233 
1234 	case MEM_NLP:
1235 		node = (NODELIST *)bp;
1236 
1237 		if (!(node->flag & NODE_POOL_ALLOCATED)) {
1238 			return;
1239 		}
1240 		break;
1241 
1242 	default:
1243 		mp = (MATCHMAP *)bp;
1244 
1245 		if (mp->flag & MAP_BUF_ALLOCATED) {
1246 			emlxs_mem_buf_free(hba, mp);
1247 			return;
1248 		}
1249 
1250 		if (mp->flag & MAP_TABLE_ALLOCATED) {
1251 			return;
1252 		}
1253 
1254 		if (!(mp->flag & MAP_POOL_ALLOCATED)) {
1255 			return;
1256 		}
1257 		break;
1258 	}
1259 
1260 	/* Free a buffer to the pool */
1261 	emlxs_mem_pool_put(hba, seg, bp);
1262 
1263 	return;
1264 
1265 } /* emlxs_mem_put() */
1266 
1267 
1268 /*
1269  * Look up the virtual address given a mapped address
1270  */
1271 /* SLI3 */
1272 extern MATCHMAP *
emlxs_mem_get_vaddr(emlxs_hba_t * hba,RING * rp,uint64_t mapbp)1273 emlxs_mem_get_vaddr(emlxs_hba_t *hba, RING *rp, uint64_t mapbp)
1274 {
1275 	emlxs_port_t *port = &PPORT;
1276 	MATCHMAP *prev;
1277 	MATCHMAP *mp;
1278 
1279 	if (rp->ringno == hba->channel_els) {
1280 		mp = (MATCHMAP *)rp->fc_mpoff;
1281 		prev = 0;
1282 
1283 		while (mp) {
1284 			if (mp->phys == mapbp) {
1285 				if (prev == 0) {
1286 					rp->fc_mpoff = mp->fc_mptr;
1287 				} else {
1288 					prev->fc_mptr = mp->fc_mptr;
1289 				}
1290 
1291 				if (rp->fc_mpon == mp) {
1292 					rp->fc_mpon = (void *)prev;
1293 				}
1294 
1295 				mp->fc_mptr = NULL;
1296 
1297 				EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1298 				    DDI_DMA_SYNC_FORKERNEL);
1299 
1300 				HBASTATS.ElsUbPosted--;
1301 
1302 				return (mp);
1303 			}
1304 
1305 			prev = mp;
1306 			mp = (MATCHMAP *)mp->fc_mptr;
1307 		}
1308 
1309 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1310 		    "ELS Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1311 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1312 
1313 	} else if (rp->ringno == hba->channel_ct) {
1314 
1315 		mp = (MATCHMAP *)rp->fc_mpoff;
1316 		prev = 0;
1317 
1318 		while (mp) {
1319 			if (mp->phys == mapbp) {
1320 				if (prev == 0) {
1321 					rp->fc_mpoff = mp->fc_mptr;
1322 				} else {
1323 					prev->fc_mptr = mp->fc_mptr;
1324 				}
1325 
1326 				if (rp->fc_mpon == mp) {
1327 					rp->fc_mpon = (void *)prev;
1328 				}
1329 
1330 				mp->fc_mptr = NULL;
1331 
1332 				EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1333 				    DDI_DMA_SYNC_FORKERNEL);
1334 
1335 				HBASTATS.CtUbPosted--;
1336 
1337 				return (mp);
1338 			}
1339 
1340 			prev = mp;
1341 			mp = (MATCHMAP *)mp->fc_mptr;
1342 		}
1343 
1344 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1345 		    "CT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1346 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1347 
1348 	} else if (rp->ringno == hba->channel_ip) {
1349 
1350 		mp = (MATCHMAP *)rp->fc_mpoff;
1351 		prev = 0;
1352 
1353 		while (mp) {
1354 			if (mp->phys == mapbp) {
1355 				if (prev == 0) {
1356 					rp->fc_mpoff = mp->fc_mptr;
1357 				} else {
1358 					prev->fc_mptr = mp->fc_mptr;
1359 				}
1360 
1361 				if (rp->fc_mpon == mp) {
1362 					rp->fc_mpon = (void *)prev;
1363 				}
1364 
1365 				mp->fc_mptr = NULL;
1366 
1367 				EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1368 				    DDI_DMA_SYNC_FORKERNEL);
1369 
1370 				HBASTATS.IpUbPosted--;
1371 
1372 				return (mp);
1373 			}
1374 
1375 			prev = mp;
1376 			mp = (MATCHMAP *)mp->fc_mptr;
1377 		}
1378 
1379 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1380 		    "IP Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1381 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1382 
1383 #ifdef SFCT_SUPPORT
1384 	} else if (rp->ringno == hba->CHANNEL_FCT) {
1385 		mp = (MATCHMAP *)rp->fc_mpoff;
1386 		prev = 0;
1387 
1388 		while (mp) {
1389 			if (mp->phys == mapbp) {
1390 				if (prev == 0) {
1391 					rp->fc_mpoff = mp->fc_mptr;
1392 				} else {
1393 					prev->fc_mptr = mp->fc_mptr;
1394 				}
1395 
1396 				if (rp->fc_mpon == mp) {
1397 					rp->fc_mpon = (void *)prev;
1398 				}
1399 
1400 				mp->fc_mptr = NULL;
1401 
1402 				EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1403 				    DDI_DMA_SYNC_FORKERNEL);
1404 
1405 				HBASTATS.FctUbPosted--;
1406 
1407 				return (mp);
1408 			}
1409 
1410 			prev = mp;
1411 			mp = (MATCHMAP *)mp->fc_mptr;
1412 		}
1413 
1414 		EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1415 		    "FCT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1416 		    mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1417 
1418 #endif /* SFCT_SUPPORT */
1419 	}
1420 
1421 	return (0);
1422 
1423 } /* emlxs_mem_get_vaddr() */
1424 
1425 
1426 /*
1427  * Given a virtual address bp, generate the physical mapped address and
1428  * place it where addr points to. Save the address pair for lookup later.
1429  */
1430 /* SLI3 */
1431 extern void
emlxs_mem_map_vaddr(emlxs_hba_t * hba,RING * rp,MATCHMAP * mp,uint32_t * haddr,uint32_t * laddr)1432 emlxs_mem_map_vaddr(emlxs_hba_t *hba, RING *rp, MATCHMAP *mp,
1433     uint32_t *haddr, uint32_t *laddr)
1434 {
1435 	if (rp->ringno == hba->channel_els) {
1436 		/*
1437 		 * Update slot fc_mpon points to then bump it
1438 		 * fc_mpoff is pointer head of the list.
1439 		 * fc_mpon is pointer tail of the list.
1440 		 */
1441 		mp->fc_mptr = NULL;
1442 		if (rp->fc_mpoff == 0) {
1443 			rp->fc_mpoff = (void *)mp;
1444 			rp->fc_mpon = (void *)mp;
1445 		} else {
1446 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1447 			    (void *)mp;
1448 			rp->fc_mpon = (void *)mp;
1449 		}
1450 
1451 		if (hba->flag & FC_SLIM2_MODE) {
1452 
1453 			/* return mapped address */
1454 			*haddr = PADDR_HI(mp->phys);
1455 			/* return mapped address */
1456 			*laddr = PADDR_LO(mp->phys);
1457 		} else {
1458 			/* return mapped address */
1459 			*laddr = PADDR_LO(mp->phys);
1460 		}
1461 
1462 		HBASTATS.ElsUbPosted++;
1463 
1464 	} else if (rp->ringno == hba->channel_ct) {
1465 		/*
1466 		 * Update slot fc_mpon points to then bump it
1467 		 * fc_mpoff is pointer head of the list.
1468 		 * fc_mpon is pointer tail of the list.
1469 		 */
1470 		mp->fc_mptr = NULL;
1471 		if (rp->fc_mpoff == 0) {
1472 			rp->fc_mpoff = (void *)mp;
1473 			rp->fc_mpon = (void *)mp;
1474 		} else {
1475 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1476 			    (void *)mp;
1477 			rp->fc_mpon = (void *)mp;
1478 		}
1479 
1480 		if (hba->flag & FC_SLIM2_MODE) {
1481 			/* return mapped address */
1482 			*haddr = PADDR_HI(mp->phys);
1483 			/* return mapped address */
1484 			*laddr = PADDR_LO(mp->phys);
1485 		} else {
1486 			/* return mapped address */
1487 			*laddr = PADDR_LO(mp->phys);
1488 		}
1489 
1490 		HBASTATS.CtUbPosted++;
1491 
1492 
1493 	} else if (rp->ringno == hba->channel_ip) {
1494 		/*
1495 		 * Update slot fc_mpon points to then bump it
1496 		 * fc_mpoff is pointer head of the list.
1497 		 * fc_mpon is pointer tail of the list.
1498 		 */
1499 		mp->fc_mptr = NULL;
1500 		if (rp->fc_mpoff == 0) {
1501 			rp->fc_mpoff = (void *)mp;
1502 			rp->fc_mpon = (void *)mp;
1503 		} else {
1504 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1505 			    (void *)mp;
1506 			rp->fc_mpon = (void *)mp;
1507 		}
1508 
1509 		if (hba->flag & FC_SLIM2_MODE) {
1510 			/* return mapped address */
1511 			*haddr = PADDR_HI(mp->phys);
1512 			*laddr = PADDR_LO(mp->phys);
1513 		} else {
1514 			*laddr = PADDR_LO(mp->phys);
1515 		}
1516 
1517 		HBASTATS.IpUbPosted++;
1518 
1519 
1520 #ifdef SFCT_SUPPORT
1521 	} else if (rp->ringno == hba->CHANNEL_FCT) {
1522 		/*
1523 		 * Update slot fc_mpon points to then bump it
1524 		 * fc_mpoff is pointer head of the list.
1525 		 * fc_mpon is pointer tail of the list.
1526 		 */
1527 		mp->fc_mptr = NULL;
1528 		if (rp->fc_mpoff == 0) {
1529 			rp->fc_mpoff = (void *)mp;
1530 			rp->fc_mpon = (void *)mp;
1531 		} else {
1532 			((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1533 			    (void *)mp;
1534 			rp->fc_mpon = (void *)mp;
1535 		}
1536 
1537 		if (hba->flag & FC_SLIM2_MODE) {
1538 			/* return mapped address */
1539 			*haddr = PADDR_HI(mp->phys);
1540 			/* return mapped address */
1541 			*laddr = PADDR_LO(mp->phys);
1542 		} else {
1543 			/* return mapped address */
1544 			*laddr = PADDR_LO(mp->phys);
1545 		}
1546 
1547 		HBASTATS.FctUbPosted++;
1548 
1549 #endif /* SFCT_SUPPORT */
1550 	}
1551 } /* emlxs_mem_map_vaddr() */
1552 
1553 
1554 /* SLI3 */
1555 uint32_t
emlxs_hbq_alloc(emlxs_hba_t * hba,uint32_t hbq_id)1556 emlxs_hbq_alloc(emlxs_hba_t *hba, uint32_t hbq_id)
1557 {
1558 	emlxs_port_t *port = &PPORT;
1559 	HBQ_INIT_t *hbq;
1560 	MBUF_INFO *buf_info;
1561 	MBUF_INFO bufinfo;
1562 
1563 	hbq = &hba->sli.sli3.hbq_table[hbq_id];
1564 
1565 	if (hbq->HBQ_host_buf.virt == 0) {
1566 		buf_info = &bufinfo;
1567 
1568 		/* Get the system's page size in a DDI-compliant way. */
1569 		bzero(buf_info, sizeof (MBUF_INFO));
1570 		buf_info->size = hbq->HBQ_numEntries * sizeof (HBQE_t);
1571 		buf_info->flags = FC_MBUF_DMA;
1572 		buf_info->align = 4096;
1573 
1574 		(void) emlxs_mem_alloc(hba, buf_info);
1575 
1576 		if (buf_info->virt == NULL) {
1577 			EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
1578 			    "Unable to alloc HBQ.");
1579 			return (ENOMEM);
1580 		}
1581 
1582 		hbq->HBQ_host_buf.virt = buf_info->virt;
1583 		hbq->HBQ_host_buf.phys = buf_info->phys;
1584 		hbq->HBQ_host_buf.data_handle = buf_info->data_handle;
1585 		hbq->HBQ_host_buf.dma_handle = buf_info->dma_handle;
1586 		hbq->HBQ_host_buf.size = buf_info->size;
1587 		hbq->HBQ_host_buf.tag = hbq_id;
1588 
1589 		bzero((char *)hbq->HBQ_host_buf.virt, buf_info->size);
1590 	}
1591 
1592 	return (0);
1593 
1594 } /* emlxs_hbq_alloc() */
1595