emlxs_mem.c (a9800beb) emlxs_mem.c (8f23e9fa)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Copyright 2010 Emulex. All rights reserved.
23 * Copyright (c) 2004-2011 Emulex. All rights reserved.
24 * Use is subject to license terms.
25 */
26
24 * Use is subject to license terms.
25 */
26
27
28#include <emlxs.h>
29
27#include <emlxs.h>
28
29/* #define EMLXS_POOL_DEBUG */
30
30EMLXS_MSG_DEF(EMLXS_MEM_C);
31
32
31EMLXS_MSG_DEF(EMLXS_MEM_C);
32
33
34static uint32_t emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg,
35 uint32_t count);
36static void emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count);
37
38
33extern int32_t
34emlxs_mem_alloc_buffer(emlxs_hba_t *hba)
35{
36 emlxs_port_t *port = &PPORT;
37 emlxs_config_t *cfg;
38 MBUF_INFO *buf_info;
39 MEMSEG *seg;
40 MBUF_INFO bufinfo;
41 int32_t i;
39extern int32_t
40emlxs_mem_alloc_buffer(emlxs_hba_t *hba)
41{
42 emlxs_port_t *port = &PPORT;
43 emlxs_config_t *cfg;
44 MBUF_INFO *buf_info;
45 MEMSEG *seg;
46 MBUF_INFO bufinfo;
47 int32_t i;
42 int32_t cnt;
43#ifdef EMLXS_SPARC
44 MATCHMAP *mp;
48 MATCHMAP *mp;
45 MATCHMAP **fcp_bpl_table;
46#endif /* EMLXS_SPARC */
49 MATCHMAP **bpl_table;
47
48 buf_info = &bufinfo;
49 cfg = &CFG;
50
51 bzero(hba->memseg, sizeof (hba->memseg));
52
50
51 buf_info = &bufinfo;
52 cfg = &CFG;
53
54 bzero(hba->memseg, sizeof (hba->memseg));
55
53 /*
54 * Initialize fc_table
55 */
56 cnt = cfg[CFG_NUM_IOTAGS].current;
57 if (cnt) {
58 hba->max_iotag = (uint16_t)cnt;
59 }
60 /* ioatg 0 is not used, iotags 1 thru max_iotag-1 are used */
61
62 /* Allocate the fc_table */
63 bzero(buf_info, sizeof (MBUF_INFO));
64 buf_info->size = (hba->max_iotag * sizeof (emlxs_buf_t *));
65
66 (void) emlxs_mem_alloc(hba, buf_info);
67 if (buf_info->virt == NULL) {
68
69 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
70 "fc_table buffer.");
71
72 goto failed;
73 }
74 hba->fc_table = buf_info->virt;
75 bzero(hba->fc_table, buf_info->size);
76
56 /* Allocate the fc_table */
57 bzero(buf_info, sizeof (MBUF_INFO));
58 buf_info->size = (hba->max_iotag * sizeof (emlxs_buf_t *));
59
60 (void) emlxs_mem_alloc(hba, buf_info);
61 if (buf_info->virt == NULL) {
62
63 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
64 "fc_table buffer.");
65
66 goto failed;
67 }
68 hba->fc_table = buf_info->virt;
69 bzero(hba->fc_table, buf_info->size);
70
77#ifdef EMLXS_SPARC
78 if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK)) {
79 /*
80 * Allocate and Initialize FCP MEM_BPL table
81 * This is for increased performance on sparc
82 */
83 bzero(buf_info, sizeof (MBUF_INFO));
84 buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
85
86 (void) emlxs_mem_alloc(hba, buf_info);
87 if (buf_info->virt == NULL) {
88
89 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
90 "FCP BPL table buffer.");
91
92 goto failed;
93 }
94 hba->sli.sli3.fcp_bpl_table = buf_info->virt;
95 bzero(hba->sli.sli3.fcp_bpl_table, buf_info->size);
96
97 /* Allocate a pool of BPLs for the FCP MEM_BPL table */
98 seg = &hba->sli.sli3.fcp_bpl_seg;
99 bzero(seg, sizeof (MEMSEG));
100 (void) strcpy(seg->fc_label, "FCP BPL Pool");
101 seg->fc_memtag = MEM_BPL;
102 seg->fc_memsize = (3 * sizeof (ULP_BDE64));
103 seg->fc_numblks = hba->max_iotag;
104 seg->fc_reserved = 0;
105 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
106 seg->fc_memalign = 32;
107
108 if (emlxs_mem_pool_alloc(hba, seg) == NULL) {
109 goto failed;
110 }
111
112 /* Initialize the FCP MEM_BPL table */
113 fcp_bpl_table = (MATCHMAP**)hba->sli.sli3.fcp_bpl_table;
114 mp = (MATCHMAP*)seg->fc_memget_ptr;
115 for (i = 0; i < seg->fc_numblks; i++) {
116 mp->flag |= MAP_TABLE_ALLOCATED;
117 *fcp_bpl_table = mp;
118
119 mp = (MATCHMAP *)mp->fc_mptr;
120 fcp_bpl_table++;
121 }
122 }
123#endif /* EMLXS_SPARC */
124
125 /* Prepare the memory pools */
126 for (i = 0; i < FC_MAX_SEG; i++) {
127 seg = &hba->memseg[i];
128
129 switch (i) {
130 case MEM_NLP:
71 /* Prepare the memory pools */
72 for (i = 0; i < FC_MAX_SEG; i++) {
73 seg = &hba->memseg[i];
74
75 switch (i) {
76 case MEM_NLP:
131 (void) strcpy(seg->fc_label, "Node Pool");
77 (void) strlcpy(seg->fc_label, "Node Pool",
78 sizeof (seg->fc_label));
132 seg->fc_memtag = MEM_NLP;
133 seg->fc_memsize = sizeof (NODELIST);
79 seg->fc_memtag = MEM_NLP;
80 seg->fc_memsize = sizeof (NODELIST);
134 seg->fc_numblks = (int16_t)hba->max_nodes + 2;
135 seg->fc_reserved = 0;
136 seg->fc_memflag = 0;
81 seg->fc_hi_water = hba->max_nodes + 2;
82 seg->fc_lo_water = 2;
83 seg->fc_step = 1;
137 break;
138
139 case MEM_IOCB:
84 break;
85
86 case MEM_IOCB:
140 (void) strcpy(seg->fc_label, "IOCB Pool");
87 (void) strlcpy(seg->fc_label, "IOCB Pool",
88 sizeof (seg->fc_label));
141 seg->fc_memtag = MEM_IOCB;
142 seg->fc_memsize = sizeof (IOCBQ);
89 seg->fc_memtag = MEM_IOCB;
90 seg->fc_memsize = sizeof (IOCBQ);
143 seg->fc_numblks = (uint16_t)cfg[CFG_NUM_IOCBS].current;
144 seg->fc_reserved = 0;
145 seg->fc_memflag = 0;
91 seg->fc_hi_water = cfg[CFG_NUM_IOCBS].current;
92 seg->fc_lo_water = cfg[CFG_NUM_IOCBS].low;
93 seg->fc_step = cfg[CFG_NUM_IOCBS].low;
146 break;
147
148 case MEM_MBOX:
94 break;
95
96 case MEM_MBOX:
149 (void) strcpy(seg->fc_label, "MBOX Pool");
97 (void) strlcpy(seg->fc_label, "MBOX Pool",
98 sizeof (seg->fc_label));
150 seg->fc_memtag = MEM_MBOX;
151 seg->fc_memsize = sizeof (MAILBOXQ);
99 seg->fc_memtag = MEM_MBOX;
100 seg->fc_memsize = sizeof (MAILBOXQ);
152 seg->fc_numblks = (int16_t)hba->max_nodes + 32;
153 seg->fc_reserved = 0;
154 seg->fc_memflag = 0;
101 seg->fc_hi_water = hba->max_nodes + 32;
102 seg->fc_lo_water = 32;
103 seg->fc_step = 1;
155 break;
156
157 case MEM_BPL:
158 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
159 continue;
160 }
104 break;
105
106 case MEM_BPL:
107 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
108 continue;
109 }
161 (void) strcpy(seg->fc_label, "BPL Pool");
110 (void) strlcpy(seg->fc_label, "BPL Pool",
111 sizeof (seg->fc_label));
162 seg->fc_memtag = MEM_BPL;
163 seg->fc_memsize = hba->sli.sli3.mem_bpl_size;
112 seg->fc_memtag = MEM_BPL;
113 seg->fc_memsize = hba->sli.sli3.mem_bpl_size;
164 seg->fc_numblks = (int16_t)hba->max_iotag + 2;
165 seg->fc_reserved = 0;
166 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
167 seg->fc_memalign = 32;
114 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
115 seg->fc_memalign = 32;
116 seg->fc_hi_water = hba->max_iotag;
117 seg->fc_lo_water = cfg[CFG_NUM_IOCBS].low;
118 seg->fc_step = cfg[CFG_NUM_IOCBS].low;
168 break;
169
170 case MEM_BUF:
171 /* These are the unsolicited ELS buffers. */
119 break;
120
121 case MEM_BUF:
122 /* These are the unsolicited ELS buffers. */
172 (void) strcpy(seg->fc_label, "BUF Pool");
123 (void) strlcpy(seg->fc_label, "BUF Pool",
124 sizeof (seg->fc_label));
173 seg->fc_memtag = MEM_BUF;
174 seg->fc_memsize = MEM_BUF_SIZE;
125 seg->fc_memtag = MEM_BUF;
126 seg->fc_memsize = MEM_BUF_SIZE;
175 seg->fc_numblks = MEM_ELSBUF_COUNT + MEM_BUF_COUNT;
176 seg->fc_reserved = 0;
177 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
178 seg->fc_memalign = 32;
127 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
128 seg->fc_memalign = 32;
129 seg->fc_hi_water = MEM_ELSBUF_COUNT + MEM_BUF_COUNT;
130 seg->fc_lo_water = MEM_ELSBUF_COUNT;
131 seg->fc_step = 1;
179 break;
180
181 case MEM_IPBUF:
182 /* These are the unsolicited IP buffers. */
183 if (cfg[CFG_NETWORK_ON].current == 0) {
184 continue;
185 }
186
132 break;
133
134 case MEM_IPBUF:
135 /* These are the unsolicited IP buffers. */
136 if (cfg[CFG_NETWORK_ON].current == 0) {
137 continue;
138 }
139
187 (void) strcpy(seg->fc_label, "IPBUF Pool");
140 (void) strlcpy(seg->fc_label, "IPBUF Pool",
141 sizeof (seg->fc_label));
188 seg->fc_memtag = MEM_IPBUF;
189 seg->fc_memsize = MEM_IPBUF_SIZE;
142 seg->fc_memtag = MEM_IPBUF;
143 seg->fc_memsize = MEM_IPBUF_SIZE;
190 seg->fc_numblks = MEM_IPBUF_COUNT;
191 seg->fc_reserved = 0;
192 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
193 seg->fc_memalign = 32;
144 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
145 seg->fc_memalign = 32;
146 seg->fc_hi_water = MEM_IPBUF_COUNT;
147 seg->fc_lo_water = 0;
148 seg->fc_step = 4;
194 break;
195
196 case MEM_CTBUF:
197 /* These are the unsolicited CT buffers. */
149 break;
150
151 case MEM_CTBUF:
152 /* These are the unsolicited CT buffers. */
198 (void) strcpy(seg->fc_label, "CTBUF Pool");
153 (void) strlcpy(seg->fc_label, "CTBUF Pool",
154 sizeof (seg->fc_label));
199 seg->fc_memtag = MEM_CTBUF;
200 seg->fc_memsize = MEM_CTBUF_SIZE;
155 seg->fc_memtag = MEM_CTBUF;
156 seg->fc_memsize = MEM_CTBUF_SIZE;
201 seg->fc_numblks = MEM_CTBUF_COUNT;
202 seg->fc_reserved = 0;
203 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
204 seg->fc_memalign = 32;
157 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
158 seg->fc_memalign = 32;
159 seg->fc_hi_water = MEM_CTBUF_COUNT;
160 seg->fc_lo_water = MEM_CTBUF_COUNT;
161 seg->fc_step = 1;
205 break;
206
162 break;
163
207 case MEM_FCTBUF:
208#ifdef SFCT_SUPPORT
164#ifdef SFCT_SUPPORT
165 case MEM_FCTBUF:
209 /* These are the unsolicited FCT buffers. */
166 /* These are the unsolicited FCT buffers. */
210 if (hba->tgt_mode == 0) {
167 if (!(port->flag & EMLXS_TGT_ENABLED)) {
211 continue;
212 }
213
168 continue;
169 }
170
214 (void) strcpy(seg->fc_label, "FCTBUF Pool");
171 (void) strlcpy(seg->fc_label, "FCTBUF Pool",
172 sizeof (seg->fc_label));
215 seg->fc_memtag = MEM_FCTBUF;
216 seg->fc_memsize = MEM_FCTBUF_SIZE;
173 seg->fc_memtag = MEM_FCTBUF;
174 seg->fc_memsize = MEM_FCTBUF_SIZE;
217 seg->fc_numblks = MEM_FCTBUF_COUNT;
218 seg->fc_reserved = 0;
219 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
220 seg->fc_memalign = 32;
175 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
176 seg->fc_memalign = 32;
221#endif /* SFCT_SUPPORT */
177 seg->fc_hi_water = MEM_FCTBUF_COUNT;
178 seg->fc_lo_water = 0;
179 seg->fc_step = 8;
222 break;
180 break;
181#endif /* SFCT_SUPPORT */
223
224 default:
225 continue;
226 }
227
228 if (seg->fc_memsize == 0) {
229 continue;
230 }
231
182
183 default:
184 continue;
185 }
186
187 if (seg->fc_memsize == 0) {
188 continue;
189 }
190
232 if (emlxs_mem_pool_alloc(hba, seg) == NULL) {
191 (void) emlxs_mem_pool_create(hba, seg);
192
193 if (seg->fc_numblks < seg->fc_lo_water) {
194 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
195 "%s: count=%d size=%d flags=%x lo=%d hi=%d",
196 seg->fc_label, seg->fc_numblks,
197 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
198 seg->fc_hi_water);
199
233 goto failed;
234 }
200 goto failed;
201 }
202 }
235
203
236 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
237 "%s: seg=%p size=%x count=%d flags=%x base=%p",
238 seg->fc_label, seg, seg->fc_memsize, seg->fc_numblks,
239 seg->fc_memflag, seg->fc_memget_ptr);
204 hba->sli.sli3.bpl_table = NULL;
205 seg = &hba->memseg[MEM_BPL];
206
207 /* If SLI3 and MEM_BPL pool is static */
208 if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK) &&
209 !(seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
210 /*
211 * Allocate and Initialize bpl_table
212 * This is for increased performance.
213 */
214 bzero(buf_info, sizeof (MBUF_INFO));
215 buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
216
217 (void) emlxs_mem_alloc(hba, buf_info);
218 if (buf_info->virt == NULL) {
219
220 EMLXS_MSGF(EMLXS_CONTEXT,
221 &emlxs_mem_alloc_failed_msg,
222 "BPL table buffer.");
223
224 goto failed;
225 }
226 hba->sli.sli3.bpl_table = buf_info->virt;
227
228 bpl_table = (MATCHMAP**)hba->sli.sli3.bpl_table;
229 for (i = 0; i < hba->max_iotag; i++) {
230 mp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL);
231 mp->flag |= MAP_TABLE_ALLOCATED;
232 bpl_table[i] = mp;
233 }
240 }
241
242 return (1);
243
244failed:
245
246 (void) emlxs_mem_free_buffer(hba);
247 return (0);

--- 5 unchanged lines hidden (view full) ---

253 * emlxs_mem_free_buffer
254 *
255 * This routine will free iocb/data buffer space
256 * and TGTM resource.
257 */
258extern int
259emlxs_mem_free_buffer(emlxs_hba_t *hba)
260{
234 }
235
236 return (1);
237
238failed:
239
240 (void) emlxs_mem_free_buffer(hba);
241 return (0);

--- 5 unchanged lines hidden (view full) ---

247 * emlxs_mem_free_buffer
248 *
249 * This routine will free iocb/data buffer space
250 * and TGTM resource.
251 */
252extern int
253emlxs_mem_free_buffer(emlxs_hba_t *hba)
254{
255 emlxs_port_t *port = &PPORT;
261 emlxs_port_t *vport;
262 int32_t j;
263 MATCHMAP *mp;
264 CHANNEL *cp;
265 RING *rp;
266 MBUF_INFO *buf_info;
267 MBUF_INFO bufinfo;
256 emlxs_port_t *vport;
257 int32_t j;
258 MATCHMAP *mp;
259 CHANNEL *cp;
260 RING *rp;
261 MBUF_INFO *buf_info;
262 MBUF_INFO bufinfo;
263 MATCHMAP **bpl_table;
268
269 buf_info = &bufinfo;
270
271 for (j = 0; j < hba->chan_count; j++) {
272 cp = &hba->chan[j];
273
274 /* Flush the ring */
275 (void) emlxs_tx_channel_flush(hba, cp, 0);

--- 42 unchanged lines hidden (view full) ---

318 }
319 }
320
321 if (hba->flag & FC_HBQ_ENABLED) {
322 emlxs_hbq_free_all(hba, EMLXS_ELS_HBQ_ID);
323 emlxs_hbq_free_all(hba, EMLXS_IP_HBQ_ID);
324 emlxs_hbq_free_all(hba, EMLXS_CT_HBQ_ID);
325
264
265 buf_info = &bufinfo;
266
267 for (j = 0; j < hba->chan_count; j++) {
268 cp = &hba->chan[j];
269
270 /* Flush the ring */
271 (void) emlxs_tx_channel_flush(hba, cp, 0);

--- 42 unchanged lines hidden (view full) ---

314 }
315 }
316
317 if (hba->flag & FC_HBQ_ENABLED) {
318 emlxs_hbq_free_all(hba, EMLXS_ELS_HBQ_ID);
319 emlxs_hbq_free_all(hba, EMLXS_IP_HBQ_ID);
320 emlxs_hbq_free_all(hba, EMLXS_CT_HBQ_ID);
321
326 if (hba->tgt_mode) {
322 if (port->flag & EMLXS_TGT_ENABLED) {
327 emlxs_hbq_free_all(hba, EMLXS_FCT_HBQ_ID);
328 }
329 }
330
331 /* Free the nodes */
332 for (j = 0; j < MAX_VPORTS; j++) {
333 vport = &VPORT(j);
334 if (vport->node_count) {
335 emlxs_node_destroy_all(vport);
336 }
337 }
338
339 /* Make sure the mailbox queue is empty */
340 emlxs_mb_flush(hba);
341
323 emlxs_hbq_free_all(hba, EMLXS_FCT_HBQ_ID);
324 }
325 }
326
327 /* Free the nodes */
328 for (j = 0; j < MAX_VPORTS; j++) {
329 vport = &VPORT(j);
330 if (vport->node_count) {
331 emlxs_node_destroy_all(vport);
332 }
333 }
334
335 /* Make sure the mailbox queue is empty */
336 emlxs_mb_flush(hba);
337
342 /* Free memory associated with all buffers on get buffer pool */
343 if (hba->fc_table) {
344 bzero(buf_info, sizeof (MBUF_INFO));
345 buf_info->size = hba->max_iotag * sizeof (emlxs_buf_t *);
346 buf_info->virt = hba->fc_table;
347 emlxs_mem_free(hba, buf_info);
348 hba->fc_table = NULL;
349 }
350
338 if (hba->fc_table) {
339 bzero(buf_info, sizeof (MBUF_INFO));
340 buf_info->size = hba->max_iotag * sizeof (emlxs_buf_t *);
341 buf_info->virt = hba->fc_table;
342 emlxs_mem_free(hba, buf_info);
343 hba->fc_table = NULL;
344 }
345
351#ifdef EMLXS_SPARC
352 if (hba->sli.sli3.fcp_bpl_table) {
346 if (hba->sli.sli3.bpl_table) {
347 /* Return MEM_BPLs to their pool */
348 bpl_table = (MATCHMAP**)hba->sli.sli3.bpl_table;
349 for (j = 0; j < hba->max_iotag; j++) {
350 mp = bpl_table[j];
351 mp->flag &= ~MAP_TABLE_ALLOCATED;
352 emlxs_mem_put(hba, MEM_BPL, (void*)mp);
353 }
354
353 bzero(buf_info, sizeof (MBUF_INFO));
354 buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
355 bzero(buf_info, sizeof (MBUF_INFO));
356 buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
355 buf_info->virt = hba->sli.sli3.fcp_bpl_table;
357 buf_info->virt = hba->sli.sli3.bpl_table;
356 emlxs_mem_free(hba, buf_info);
358 emlxs_mem_free(hba, buf_info);
357 hba->sli.sli3.fcp_bpl_table = NULL;
359 hba->sli.sli3.bpl_table = NULL;
358 }
359
360 }
361
360 if (hba->sli.sli3.fcp_bpl_seg.fc_memsize) {
361 emlxs_mem_pool_free(hba, &hba->sli.sli3.fcp_bpl_seg);
362 bzero(&hba->sli.sli3.fcp_bpl_seg, sizeof (MEMSEG));
363 }
364#endif /* EMLXS_SPARC */
365
366 /* Free the memory segments */
367 for (j = 0; j < FC_MAX_SEG; j++) {
362 /* Free the memory segments */
363 for (j = 0; j < FC_MAX_SEG; j++) {
368 emlxs_mem_pool_free(hba, &hba->memseg[j]);
364 emlxs_mem_pool_destroy(hba, &hba->memseg[j]);
369 }
370
371 return (0);
372
373} /* emlxs_mem_free_buffer() */
374
375
365 }
366
367 return (0);
368
369} /* emlxs_mem_free_buffer() */
370
371
376extern MEMSEG *
377emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg)
372/* Must hold EMLXS_MEMGET_LOCK when calling */
373static uint32_t
374emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count)
378{
379 emlxs_port_t *port = &PPORT;
380 uint8_t *bp = NULL;
381 MATCHMAP *mp = NULL;
382 MBUF_INFO *buf_info;
383 MBUF_INFO local_buf_info;
384 uint32_t i;
375{
376 emlxs_port_t *port = &PPORT;
377 uint8_t *bp = NULL;
378 MATCHMAP *mp = NULL;
379 MBUF_INFO *buf_info;
380 MBUF_INFO local_buf_info;
381 uint32_t i;
382 uint32_t fc_numblks;
385
383
386 buf_info = &local_buf_info;
384 if (seg->fc_memsize == 0) {
385 return (0);
386 }
387
387
388 mutex_enter(&EMLXS_MEMGET_LOCK);
389 mutex_enter(&EMLXS_MEMPUT_LOCK);
388 if (seg->fc_numblks >= seg->fc_hi_water) {
389 return (0);
390 }
390
391
391 /* Calculate total memory size */
392 seg->fc_total_memsize = (seg->fc_memsize * seg->fc_numblks);
392 if (count == 0) {
393 return (0);
394 }
393
395
394 if (seg->fc_total_memsize == 0) {
395 mutex_exit(&EMLXS_MEMPUT_LOCK);
396 mutex_exit(&EMLXS_MEMGET_LOCK);
397 return (NULL);
396 if (count > (seg->fc_hi_water - seg->fc_numblks)) {
397 count = (seg->fc_hi_water - seg->fc_numblks);
398 }
399
398 }
399
400 buf_info = &local_buf_info;
401 fc_numblks = seg->fc_numblks;
402
403 /* Check for initial allocation */
404 if (!(seg->fc_memflag & FC_MEMSEG_PUT_ENABLED)) {
405 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
406 "%s alloc:%d n=%d s=%d f=%x l=%d,%d,%d "
407 "f=%d:%d",
408 seg->fc_label, count, seg->fc_numblks,
409 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
410 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
411 seg->fc_low);
412 }
413
400 if (!(seg->fc_memflag & FC_MBUF_DMA)) {
401 goto vmem_pool;
402 }
403
404/* dma_pool */
405
414 if (!(seg->fc_memflag & FC_MBUF_DMA)) {
415 goto vmem_pool;
416 }
417
418/* dma_pool */
419
406 for (i = 0; i < seg->fc_numblks; i++) {
420 for (i = 0; i < count; i++) {
407 bzero(buf_info, sizeof (MBUF_INFO));
408 buf_info->size = sizeof (MATCHMAP);
409 buf_info->align = sizeof (void *);
410
411 (void) emlxs_mem_alloc(hba, buf_info);
412 if (buf_info->virt == NULL) {
413 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
421 bzero(buf_info, sizeof (MBUF_INFO));
422 buf_info->size = sizeof (MATCHMAP);
423 buf_info->align = sizeof (void *);
424
425 (void) emlxs_mem_alloc(hba, buf_info);
426 if (buf_info->virt == NULL) {
427 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
414 "%s desc[%d]. size=%d", seg->fc_label, i,
415 buf_info->size);
428 "%s: count=%d size=%d",
429 seg->fc_label, seg->fc_numblks, seg->fc_memsize);
416
430
417 goto failed;
431 goto done;
418 }
419
420 mp = (MATCHMAP *)buf_info->virt;
421 bzero(mp, sizeof (MATCHMAP));
422
423 bzero(buf_info, sizeof (MBUF_INFO));
424 buf_info->size = seg->fc_memsize;
425 buf_info->flags = seg->fc_memflag;
426 buf_info->align = seg->fc_memalign;
427
428 (void) emlxs_mem_alloc(hba, buf_info);
429 if (buf_info->virt == NULL) {
430 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
432 }
433
434 mp = (MATCHMAP *)buf_info->virt;
435 bzero(mp, sizeof (MATCHMAP));
436
437 bzero(buf_info, sizeof (MBUF_INFO));
438 buf_info->size = seg->fc_memsize;
439 buf_info->flags = seg->fc_memflag;
440 buf_info->align = seg->fc_memalign;
441
442 (void) emlxs_mem_alloc(hba, buf_info);
443 if (buf_info->virt == NULL) {
444 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
431 "%s buffer[%d]. size=%d", seg->fc_label, i,
432 buf_info->size);
445 "%s: count=%d size=%d",
446 seg->fc_label, seg->fc_numblks, seg->fc_memsize);
433
434 /* Free the mp object */
435 bzero(buf_info, sizeof (MBUF_INFO));
436 buf_info->size = sizeof (MATCHMAP);
437 buf_info->virt = (void *)mp;
438 emlxs_mem_free(hba, buf_info);
439
447
448 /* Free the mp object */
449 bzero(buf_info, sizeof (MBUF_INFO));
450 buf_info->size = sizeof (MATCHMAP);
451 buf_info->virt = (void *)mp;
452 emlxs_mem_free(hba, buf_info);
453
440 goto failed;
454 goto done;
441 }
442 bp = (uint8_t *)buf_info->virt;
443 bzero(bp, seg->fc_memsize);
444
445 mp->virt = buf_info->virt;
446 mp->phys = buf_info->phys;
447 mp->size = buf_info->size;
448 mp->dma_handle = buf_info->dma_handle;
449 mp->data_handle = buf_info->data_handle;
450 mp->tag = seg->fc_memtag;
451 mp->segment = seg;
452 mp->flag |= MAP_POOL_ALLOCATED;
453
455 }
456 bp = (uint8_t *)buf_info->virt;
457 bzero(bp, seg->fc_memsize);
458
459 mp->virt = buf_info->virt;
460 mp->phys = buf_info->phys;
461 mp->size = buf_info->size;
462 mp->dma_handle = buf_info->dma_handle;
463 mp->data_handle = buf_info->data_handle;
464 mp->tag = seg->fc_memtag;
465 mp->segment = seg;
466 mp->flag |= MAP_POOL_ALLOCATED;
467
468#ifdef SFCT_SUPPORT
469 if (mp->tag >= MEM_FCTSEG) {
470 if (emlxs_fct_stmf_alloc(hba, mp)) {
471 /* Free the DMA memory itself */
472 emlxs_mem_free(hba, buf_info);
473
474 /* Free the mp object */
475 bzero(buf_info, sizeof (MBUF_INFO));
476 buf_info->size = sizeof (MATCHMAP);
477 buf_info->virt = (void *)mp;
478 emlxs_mem_free(hba, buf_info);
479
480 goto done;
481 }
482 }
483#endif /* SFCT_SUPPORT */
484
454 /* Add the buffer desc to the tail of the pool freelist */
455 if (seg->fc_memget_end == NULL) {
456 seg->fc_memget_ptr = (uint8_t *)mp;
457 seg->fc_memget_cnt = 1;
458 } else {
459 *((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)mp;
460 seg->fc_memget_cnt++;
461 }
462 seg->fc_memget_end = (uint8_t *)mp;
485 /* Add the buffer desc to the tail of the pool freelist */
486 if (seg->fc_memget_end == NULL) {
487 seg->fc_memget_ptr = (uint8_t *)mp;
488 seg->fc_memget_cnt = 1;
489 } else {
490 *((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)mp;
491 seg->fc_memget_cnt++;
492 }
493 seg->fc_memget_end = (uint8_t *)mp;
494
495 seg->fc_numblks++;
496 seg->fc_total_memsize += (seg->fc_memsize + sizeof (MATCHMAP));
463 }
464
497 }
498
465 mutex_exit(&EMLXS_MEMPUT_LOCK);
466 mutex_exit(&EMLXS_MEMGET_LOCK);
467 return (seg);
499 goto done;
468
469vmem_pool:
470
500
501vmem_pool:
502
471 mutex_exit(&EMLXS_MEMPUT_LOCK);
472 mutex_exit(&EMLXS_MEMGET_LOCK);
503 for (i = 0; i < count; i++) {
504 bzero(buf_info, sizeof (MBUF_INFO));
505 buf_info->size = seg->fc_memsize;
473
506
474 seg->fc_memstart_virt = kmem_zalloc(seg->fc_total_memsize, KM_SLEEP);
507 (void) emlxs_mem_alloc(hba, buf_info);
508 if (buf_info->virt == NULL) {
509 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
510 "%s: count=%d size=%d",
511 seg->fc_label, seg->fc_numblks, seg->fc_memsize);
475
512
476 mutex_enter(&EMLXS_MEMGET_LOCK);
477 mutex_enter(&EMLXS_MEMPUT_LOCK);
513 goto done;
514 }
515 bp = (uint8_t *)buf_info->virt;
478
516
479 if (seg->fc_memstart_virt == NULL) {
480 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
481 "%s base. size=%d", seg->fc_label,
482 seg->fc_total_memsize);
483
484 goto failed;
485 }
486
487 bp = (uint8_t *)seg->fc_memstart_virt;
488 for (i = 0; i < seg->fc_numblks; i++) {
489
490 /* Add the buffer to the tail of the pool freelist */
491 if (seg->fc_memget_end == NULL) {
492 seg->fc_memget_ptr = (uint8_t *)bp;
493 seg->fc_memget_cnt = 1;
494 } else {
495 *((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)bp;
496 seg->fc_memget_cnt++;
497 }
498 seg->fc_memget_end = (uint8_t *)bp;
499
517 /* Add the buffer to the tail of the pool freelist */
518 if (seg->fc_memget_end == NULL) {
519 seg->fc_memget_ptr = (uint8_t *)bp;
520 seg->fc_memget_cnt = 1;
521 } else {
522 *((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)bp;
523 seg->fc_memget_cnt++;
524 }
525 seg->fc_memget_end = (uint8_t *)bp;
526
500 bp += seg->fc_memsize;
527 seg->fc_numblks++;
528 seg->fc_total_memsize += seg->fc_memsize;
501 }
502
529 }
530
503 mutex_exit(&EMLXS_MEMPUT_LOCK);
504 mutex_exit(&EMLXS_MEMGET_LOCK);
505 return (seg);
531done:
506
532
507failed:
533 return ((seg->fc_numblks - fc_numblks));
508
534
509 mutex_exit(&EMLXS_MEMPUT_LOCK);
510 mutex_exit(&EMLXS_MEMGET_LOCK);
511 emlxs_mem_pool_free(hba, seg);
512 return (NULL);
513
514} /* emlxs_mem_pool_alloc() */
515
516
535} /* emlxs_mem_pool_alloc() */
536
537
517extern void
518emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg)
538/* Must hold EMLXS_MEMGET_LOCK & EMLXS_MEMPUT_LOCK when calling */
539static void
540emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count)
519{
520 emlxs_port_t *port = &PPORT;
521 uint8_t *bp = NULL;
522 MATCHMAP *mp = NULL;
523 MBUF_INFO *buf_info;
524 MBUF_INFO local_buf_info;
541{
542 emlxs_port_t *port = &PPORT;
543 uint8_t *bp = NULL;
544 MATCHMAP *mp = NULL;
545 MBUF_INFO *buf_info;
546 MBUF_INFO local_buf_info;
525 MEMSEG segment;
526 uint32_t free;
527
547
528 /* Save a local copy of the segment and */
529 /* destroy the original outside of locks */
530 mutex_enter(&EMLXS_MEMGET_LOCK);
531 mutex_enter(&EMLXS_MEMPUT_LOCK);
548 if ((seg->fc_memsize == 0) ||
549 (seg->fc_numblks == 0) ||
550 (count == 0)) {
551 return;
552 }
532
553
533 free = seg->fc_memget_cnt + seg->fc_memput_cnt;
534 if (free < seg->fc_numblks) {
535 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
536 "emlxs_mem_pool_free: %s not full. (%d < %d)",
537 seg->fc_label, free, seg->fc_numblks);
554 /* Check max count */
555 if (count > seg->fc_numblks) {
556 count = seg->fc_numblks;
538 }
539
557 }
558
540 bcopy(seg, &segment, sizeof (MEMSEG));
541 bzero((char *)seg, sizeof (MEMSEG));
542 seg = &segment;
559 /* Move memput list to memget list */
560 if (seg->fc_memput_ptr) {
561 if (seg->fc_memget_end == NULL) {
562 seg->fc_memget_ptr = seg->fc_memput_ptr;
563 } else {
564 *((uint8_t **)(seg->fc_memget_end)) =\
565 seg->fc_memput_ptr;
566 }
567 seg->fc_memget_end = seg->fc_memput_end;
568 seg->fc_memget_cnt += seg->fc_memput_cnt;
543
569
544 mutex_exit(&EMLXS_MEMPUT_LOCK);
545 mutex_exit(&EMLXS_MEMGET_LOCK);
570 seg->fc_memput_ptr = NULL;
571 seg->fc_memput_end = NULL;
572 seg->fc_memput_cnt = 0;
573 }
546
574
547 /* Now free the memory */
575 buf_info = &local_buf_info;
548
576
549 if (!(seg->fc_memflag & FC_MBUF_DMA)) {
550 if (seg->fc_memstart_virt) {
551 kmem_free(seg->fc_memstart_virt, seg->fc_total_memsize);
552 }
577 /* Check for final deallocation */
578 if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
579 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
580 "%s free:%d n=%d s=%d f=%x l=%d,%d,%d "
581 "f=%d:%d",
582 seg->fc_label, count, seg->fc_numblks,
583 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
584 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
585 seg->fc_low);
586 }
553
587
554 return;
588 if (!(seg->fc_memflag & FC_MBUF_DMA)) {
589 goto vmem_pool;
555 }
556
590 }
591
557 buf_info = &local_buf_info;
592dma_pool:
558
559 /* Free memory associated with all buffers on get buffer pool */
593
594 /* Free memory associated with all buffers on get buffer pool */
560 while ((bp = seg->fc_memget_ptr) != NULL) {
561 seg->fc_memget_ptr = *((uint8_t **)bp);
595 while (count && ((bp = seg->fc_memget_ptr) != NULL)) {
596 /* Remove buffer from list */
597 if (seg->fc_memget_end == bp) {
598 seg->fc_memget_ptr = NULL;
599 seg->fc_memget_end = NULL;
600 seg->fc_memget_cnt = 0;
601
602 } else {
603 seg->fc_memget_ptr = *((uint8_t **)bp);
604 seg->fc_memget_cnt--;
605 }
562 mp = (MATCHMAP *)bp;
563
606 mp = (MATCHMAP *)bp;
607
608#ifdef SFCT_SUPPORT
609 if (mp->tag >= MEM_FCTSEG) {
610 emlxs_fct_stmf_free(hba, mp);
611 }
612#endif /* SFCT_SUPPORT */
613
614 /* Free the DMA memory itself */
564 bzero(buf_info, sizeof (MBUF_INFO));
565 buf_info->size = mp->size;
566 buf_info->virt = mp->virt;
567 buf_info->phys = mp->phys;
568 buf_info->dma_handle = mp->dma_handle;
569 buf_info->data_handle = mp->data_handle;
570 buf_info->flags = seg->fc_memflag;
571 emlxs_mem_free(hba, buf_info);
572
615 bzero(buf_info, sizeof (MBUF_INFO));
616 buf_info->size = mp->size;
617 buf_info->virt = mp->virt;
618 buf_info->phys = mp->phys;
619 buf_info->dma_handle = mp->dma_handle;
620 buf_info->data_handle = mp->data_handle;
621 buf_info->flags = seg->fc_memflag;
622 emlxs_mem_free(hba, buf_info);
623
624 /* Free the handle */
573 bzero(buf_info, sizeof (MBUF_INFO));
574 buf_info->size = sizeof (MATCHMAP);
575 buf_info->virt = (void *)mp;
576 emlxs_mem_free(hba, buf_info);
625 bzero(buf_info, sizeof (MBUF_INFO));
626 buf_info->size = sizeof (MATCHMAP);
627 buf_info->virt = (void *)mp;
628 emlxs_mem_free(hba, buf_info);
629
630 seg->fc_numblks--;
631 seg->fc_total_memsize -= (seg->fc_memsize + sizeof (MATCHMAP));
632
633 count--;
577 }
578
634 }
635
579 /* Free memory associated with all buffers on put buffer pool */
580 while ((bp = seg->fc_memput_ptr) != NULL) {
581 seg->fc_memput_ptr = *((uint8_t **)bp);
582 mp = (MATCHMAP *)bp;
636 return;
583
637
584 bzero(buf_info, sizeof (MBUF_INFO));
585 buf_info->size = mp->size;
586 buf_info->virt = mp->virt;
587 buf_info->phys = mp->phys;
588 buf_info->dma_handle = mp->dma_handle;
589 buf_info->data_handle = mp->data_handle;
590 buf_info->flags = seg->fc_memflag;
591 emlxs_mem_free(hba, buf_info);
638vmem_pool:
592
639
640 /* Free memory associated with all buffers on get buffer pool */
641 while (count && ((bp = seg->fc_memget_ptr) != NULL)) {
642 /* Remove buffer from list */
643 if (seg->fc_memget_end == bp) {
644 seg->fc_memget_ptr = NULL;
645 seg->fc_memget_end = NULL;
646 seg->fc_memget_cnt = 0;
647
648 } else {
649 seg->fc_memget_ptr = *((uint8_t **)bp);
650 seg->fc_memget_cnt--;
651 }
652
653 /* Free the Virtual memory itself */
593 bzero(buf_info, sizeof (MBUF_INFO));
654 bzero(buf_info, sizeof (MBUF_INFO));
594 buf_info->size = sizeof (MATCHMAP);
595 buf_info->virt = (void *)mp;
655 buf_info->size = seg->fc_memsize;
656 buf_info->virt = bp;
596 emlxs_mem_free(hba, buf_info);
657 emlxs_mem_free(hba, buf_info);
658
659 seg->fc_numblks--;
660 seg->fc_total_memsize -= seg->fc_memsize;
661
662 count--;
597 }
598
599 return;
600
601} /* emlxs_mem_pool_free() */
602
603
663 }
664
665 return;
666
667} /* emlxs_mem_pool_free() */
668
669
604extern void *
605emlxs_mem_pool_get(emlxs_hba_t *hba, MEMSEG *seg, uint32_t priority)
670extern uint32_t
671emlxs_mem_pool_create(emlxs_hba_t *hba, MEMSEG *seg)
606{
672{
607 emlxs_port_t *port = &PPORT;
608 void *bp = NULL;
609 MATCHMAP *mp;
610 uint32_t free;
673 emlxs_config_t *cfg = &CFG;
611
612 mutex_enter(&EMLXS_MEMGET_LOCK);
674
675 mutex_enter(&EMLXS_MEMGET_LOCK);
676 mutex_enter(&EMLXS_MEMPUT_LOCK);
613
677
614 /* Check if memory segment destroyed! */
615 if (seg->fc_total_memsize == 0) {
678 if (seg->fc_memsize == 0) {
679 mutex_exit(&EMLXS_MEMPUT_LOCK);
616 mutex_exit(&EMLXS_MEMGET_LOCK);
680 mutex_exit(&EMLXS_MEMGET_LOCK);
617 return (NULL);
681
682 return (0);
618 }
619
683 }
684
620 /* Check priority and reserved status */
621 if ((priority == 0) && seg->fc_reserved) {
622 free = seg->fc_memget_cnt + seg->fc_memput_cnt;
623 if (free <= seg->fc_reserved) {
624 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_alloc_failed_msg,
625 "%s low. (%d <= %d)", seg->fc_label,
626 free, seg->fc_reserved);
685 /* Sanity check hi > lo */
686 if (seg->fc_lo_water > seg->fc_hi_water) {
687 seg->fc_hi_water = seg->fc_lo_water;
688 }
627
689
628 mutex_exit(&EMLXS_MEMGET_LOCK);
629 return (NULL);
690 /* If dynamic pools are disabled, then force pool to max level */
691 if (cfg[CFG_MEM_DYNAMIC].current == 0) {
692 seg->fc_lo_water = seg->fc_hi_water;
693 }
694
695 /* If pool is dynamic, then fc_step must be >0 */
696 /* Otherwise, fc_step must be 0 */
697 if (seg->fc_lo_water != seg->fc_hi_water) {
698 seg->fc_memflag |= FC_MEMSEG_DYNAMIC;
699
700 if (seg->fc_step == 0) {
701 seg->fc_step = 1;
630 }
702 }
703 } else {
704 seg->fc_step = 0;
631 }
632
705 }
706
633top:
707 seg->fc_numblks = 0;
708 seg->fc_total_memsize = 0;
709 seg->fc_low = 0;
634
710
635 if (seg->fc_memget_ptr) {
711 (void) emlxs_mem_pool_alloc(hba, seg, seg->fc_lo_water);
636
712
637 bp = seg->fc_memget_ptr;
713 seg->fc_memflag |= (FC_MEMSEG_PUT_ENABLED|FC_MEMSEG_GET_ENABLED);
638
714
639 /* Remove buffer from freelist */
640 if (seg->fc_memget_end == bp) {
641 seg->fc_memget_ptr = NULL;
642 seg->fc_memget_end = NULL;
643 seg->fc_memget_cnt = 0;
715 mutex_exit(&EMLXS_MEMPUT_LOCK);
716 mutex_exit(&EMLXS_MEMGET_LOCK);
644
717
645 } else {
646 seg->fc_memget_ptr = *((uint8_t **)bp);
647 seg->fc_memget_cnt--;
648 }
718 return (seg->fc_numblks);
649
719
650 if (!(seg->fc_memflag & FC_MBUF_DMA)) {
651 bzero(bp, seg->fc_memsize);
652 } else {
653 mp = (MATCHMAP *)bp;
654 mp->fc_mptr = NULL;
655 mp->flag |= MAP_POOL_ALLOCATED;
656 }
720} /* emlxs_mem_pool_create() */
657
721
722
723extern void
724emlxs_mem_pool_destroy(emlxs_hba_t *hba, MEMSEG *seg)
725{
726 emlxs_port_t *port = &PPORT;
727
728 mutex_enter(&EMLXS_MEMGET_LOCK);
729 mutex_enter(&EMLXS_MEMPUT_LOCK);
730
731 if (seg->fc_memsize == 0) {
732 mutex_exit(&EMLXS_MEMPUT_LOCK);
733 mutex_exit(&EMLXS_MEMGET_LOCK);
734 return;
735 }
736
737 /* Leave FC_MEMSEG_PUT_ENABLED set for now */
738 seg->fc_memflag &= ~FC_MEMSEG_GET_ENABLED;
739
740 /* Try to free all objects */
741 emlxs_mem_pool_free(hba, seg, seg->fc_numblks);
742
743 if (seg->fc_numblks) {
744 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
745 "mem_pool_destroy: %s leak detected: "
746 "%d objects still allocated.",
747 seg->fc_label, seg->fc_numblks);
658 } else {
748 } else {
749 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
750 "mem_pool_destroy: %s destroyed.",
751 seg->fc_label);
752
753 /* Clear all */
754 bzero(seg, sizeof (MEMSEG));
755 }
756
757 mutex_exit(&EMLXS_MEMPUT_LOCK);
758 mutex_exit(&EMLXS_MEMGET_LOCK);
759
760 return;
761
762} /* emlxs_mem_pool_destroy() */
763
764
765extern void
766emlxs_mem_pool_clean(emlxs_hba_t *hba, MEMSEG *seg)
767{
768 emlxs_port_t *port = &PPORT;
769 uint32_t clean_count;
770 uint32_t free_count;
771 uint32_t free_pad;
772
773 mutex_enter(&EMLXS_MEMGET_LOCK);
774 mutex_enter(&EMLXS_MEMPUT_LOCK);
775
776 if (!(seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
777 mutex_exit(&EMLXS_MEMPUT_LOCK);
778 mutex_exit(&EMLXS_MEMGET_LOCK);
779 return;
780 }
781
782 if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
783 goto done;
784 }
785
786#ifdef EMLXS_POOL_DEBUG
787 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
788 "%s clean: n=%d s=%d f=%x l=%d,%d,%d "
789 "f=%d:%d",
790 seg->fc_label, seg->fc_numblks,
791 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
792 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
793 seg->fc_low);
794#endif /* EMLXS_POOL_DEBUG */
795
796 /* Calculatge current free count */
797 free_count = (seg->fc_memget_cnt + seg->fc_memput_cnt);
798
799 /* Reset fc_low value to current free count */
800 clean_count = seg->fc_low;
801 seg->fc_low = free_count;
802
803 /* Return if pool is already at lo water mark */
804 if (seg->fc_numblks <= seg->fc_lo_water) {
805 goto done;
806 }
807
808 /* Return if there is nothing to clean */
809 if ((free_count == 0) ||
810 (clean_count <= 1)) {
811 goto done;
812 }
813
814 /* Calculate a 3 percent free pad count (1 being minimum) */
815 if (seg->fc_numblks > 66) {
816 free_pad = ((seg->fc_numblks * 3)/100);
817 } else {
818 free_pad = 1;
819 }
820
821 /* Return if fc_low is below pool free pad */
822 if (clean_count <= free_pad) {
823 goto done;
824 }
825
826 clean_count -= free_pad;
827
828 /* clean_count can't exceed minimum pool levels */
829 if (clean_count > (seg->fc_numblks - seg->fc_lo_water)) {
830 clean_count = (seg->fc_numblks - seg->fc_lo_water);
831 }
832
833 emlxs_mem_pool_free(hba, seg, clean_count);
834
835done:
836 if (seg->fc_last != seg->fc_numblks) {
837 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
838 "%s update: n=%d->%d s=%d f=%x l=%d,%d,%d "
839 "f=%d:%d",
840 seg->fc_label, seg->fc_last, seg->fc_numblks,
841 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
842 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
843 seg->fc_low);
844
845 seg->fc_last = seg->fc_numblks;
846 }
847
848 mutex_exit(&EMLXS_MEMPUT_LOCK);
849 mutex_exit(&EMLXS_MEMGET_LOCK);
850 return;
851
852} /* emlxs_mem_pool_clean() */
853
854
855extern void *
856emlxs_mem_pool_get(emlxs_hba_t *hba, MEMSEG *seg)
857{
858 emlxs_port_t *port = &PPORT;
859 void *bp = NULL;
860 MATCHMAP *mp;
861 uint32_t free_count;
862
863 mutex_enter(&EMLXS_MEMGET_LOCK);
864
865 /* Check if memory pool is GET enabled */
866 if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
867 mutex_exit(&EMLXS_MEMGET_LOCK);
868 return (NULL);
869 }
870
871 /* If no entries on memget list, then check memput list */
872 if (!seg->fc_memget_ptr) {
659 mutex_enter(&EMLXS_MEMPUT_LOCK);
660 if (seg->fc_memput_ptr) {
661 /*
662 * Move list from memput to memget
663 */
664 seg->fc_memget_ptr = seg->fc_memput_ptr;
665 seg->fc_memget_end = seg->fc_memput_end;
666 seg->fc_memget_cnt = seg->fc_memput_cnt;
667 seg->fc_memput_ptr = NULL;
668 seg->fc_memput_end = NULL;
669 seg->fc_memput_cnt = 0;
873 mutex_enter(&EMLXS_MEMPUT_LOCK);
874 if (seg->fc_memput_ptr) {
875 /*
876 * Move list from memput to memget
877 */
878 seg->fc_memget_ptr = seg->fc_memput_ptr;
879 seg->fc_memget_end = seg->fc_memput_end;
880 seg->fc_memget_cnt = seg->fc_memput_cnt;
881 seg->fc_memput_ptr = NULL;
882 seg->fc_memput_end = NULL;
883 seg->fc_memput_cnt = 0;
670 mutex_exit(&EMLXS_MEMPUT_LOCK);
671
672 goto top;
673 }
674 mutex_exit(&EMLXS_MEMPUT_LOCK);
884 }
885 mutex_exit(&EMLXS_MEMPUT_LOCK);
886 }
675
887
888 /* If no entries on memget list, then pool is empty */
889 /* Try to allocate more if pool is dynamic */
890 if (!seg->fc_memget_ptr &&
891 (seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
892 (void) emlxs_mem_pool_alloc(hba, seg, seg->fc_step);
893 seg->fc_low = 0;
894 }
895
896 /* If no entries on memget list, then pool is empty */
897 if (!seg->fc_memget_ptr) {
676 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_alloc_failed_msg,
677 "%s empty.", seg->fc_label);
898 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_alloc_failed_msg,
899 "%s empty.", seg->fc_label);
900
901 mutex_exit(&EMLXS_MEMGET_LOCK);
902 return (NULL);
678 }
679
903 }
904
905 /* Remove an entry from the get list */
906 bp = seg->fc_memget_ptr;
907
908 if (seg->fc_memget_end == bp) {
909 seg->fc_memget_ptr = NULL;
910 seg->fc_memget_end = NULL;
911 seg->fc_memget_cnt = 0;
912
913 } else {
914 seg->fc_memget_ptr = *((uint8_t **)bp);
915 seg->fc_memget_cnt--;
916 }
917
918 /* Initialize buffer */
919 if (!(seg->fc_memflag & FC_MBUF_DMA)) {
920 bzero(bp, seg->fc_memsize);
921 } else {
922 mp = (MATCHMAP *)bp;
923 mp->fc_mptr = NULL;
924 mp->flag |= MAP_POOL_ALLOCATED;
925 }
926
927 /* Set fc_low if pool is dynamic */
928 if (seg->fc_memflag & FC_MEMSEG_DYNAMIC) {
929 free_count = (seg->fc_memget_cnt + seg->fc_memput_cnt);
930 if (free_count < seg->fc_low) {
931 seg->fc_low = free_count;
932 }
933 }
934
680 mutex_exit(&EMLXS_MEMGET_LOCK);
681
682 return (bp);
683
684} /* emlxs_mem_pool_get() */
685
686
687extern void
688emlxs_mem_pool_put(emlxs_hba_t *hba, MEMSEG *seg, void *bp)
689{
690 emlxs_port_t *port = &PPORT;
691 MATCHMAP *mp;
935 mutex_exit(&EMLXS_MEMGET_LOCK);
936
937 return (bp);
938
939} /* emlxs_mem_pool_get() */
940
941
942extern void
943emlxs_mem_pool_put(emlxs_hba_t *hba, MEMSEG *seg, void *bp)
944{
945 emlxs_port_t *port = &PPORT;
946 MATCHMAP *mp;
692 void *base;
693 void *end;
694
695 /* Free the pool object */
696 mutex_enter(&EMLXS_MEMPUT_LOCK);
697
947
948 /* Free the pool object */
949 mutex_enter(&EMLXS_MEMPUT_LOCK);
950
698 /* Check if memory segment destroyed! */
699 if (seg->fc_total_memsize == 0) {
951 /* Check if memory pool is PUT enabled */
952 if (!(seg->fc_memflag & FC_MEMSEG_PUT_ENABLED)) {
700 mutex_exit(&EMLXS_MEMPUT_LOCK);
701 return;
702 }
703
704 /* Check if buffer was just freed */
953 mutex_exit(&EMLXS_MEMPUT_LOCK);
954 return;
955 }
956
957 /* Check if buffer was just freed */
705 if (seg->fc_memput_ptr == bp) {
958 if ((seg->fc_memput_end == bp) || (seg->fc_memget_end == bp)) {
706 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
707 "%s: Freeing free object: bp=%p", seg->fc_label, bp);
708
709 mutex_exit(&EMLXS_MEMPUT_LOCK);
710 return;
711 }
712
959 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
960 "%s: Freeing free object: bp=%p", seg->fc_label, bp);
961
962 mutex_exit(&EMLXS_MEMPUT_LOCK);
963 return;
964 }
965
713 /* Validate the buffer belongs to this pool */
966 /* Validate DMA buffer */
714 if (seg->fc_memflag & FC_MBUF_DMA) {
715 mp = (MATCHMAP *)bp;
716
717 if (!(mp->flag & MAP_POOL_ALLOCATED) ||
718 (mp->segment != seg)) {
719 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
967 if (seg->fc_memflag & FC_MBUF_DMA) {
968 mp = (MATCHMAP *)bp;
969
970 if (!(mp->flag & MAP_POOL_ALLOCATED) ||
971 (mp->segment != seg)) {
972 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
720 "emlxs_mem_pool_put: %s invalid: mp=%p " \
973 "mem_pool_put: %s invalid: mp=%p " \
721 "tag=0x%x flag=%x", seg->fc_label,
722 mp, mp->tag, mp->flag);
723
724 EMLXS_STATE_CHANGE(hba, FC_ERROR);
725
726 mutex_exit(&EMLXS_MEMPUT_LOCK);
727
728 emlxs_thread_spawn(hba, emlxs_shutdown_thread,
729 NULL, NULL);
730
731 return;
732 }
974 "tag=0x%x flag=%x", seg->fc_label,
975 mp, mp->tag, mp->flag);
976
977 EMLXS_STATE_CHANGE(hba, FC_ERROR);
978
979 mutex_exit(&EMLXS_MEMPUT_LOCK);
980
981 emlxs_thread_spawn(hba, emlxs_shutdown_thread,
982 NULL, NULL);
983
984 return;
985 }
733
734 } else { /* Vmem_pool */
735 base = seg->fc_memstart_virt;
736 end = (void *)((uint8_t *)seg->fc_memstart_virt +
737 seg->fc_total_memsize);
738
739 if (bp < base || bp >= end) {
740 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
741 "emlxs_mem_pool_put: %s Invalid: bp=%p base=%p " \
742 "end=%p", seg->fc_label,
743 bp, base, end);
744
745 EMLXS_STATE_CHANGE(hba, FC_ERROR);
746
747 mutex_exit(&EMLXS_MEMPUT_LOCK);
748
749 emlxs_thread_spawn(hba, emlxs_shutdown_thread,
750 NULL, NULL);
751
752 return;
753 }
754 }
755
986 }
987
756 /* Release buffer to the end of the freelist */
988 /* Release buffer to the end of the memput list */
757 if (seg->fc_memput_end == NULL) {
758 seg->fc_memput_ptr = bp;
759 seg->fc_memput_cnt = 1;
760 } else {
761 *((void **)(seg->fc_memput_end)) = bp;
762 seg->fc_memput_cnt++;
763 }
764 seg->fc_memput_end = bp;
765 *((void **)(bp)) = NULL;
766
767 mutex_exit(&EMLXS_MEMPUT_LOCK);
768
989 if (seg->fc_memput_end == NULL) {
990 seg->fc_memput_ptr = bp;
991 seg->fc_memput_cnt = 1;
992 } else {
993 *((void **)(seg->fc_memput_end)) = bp;
994 seg->fc_memput_cnt++;
995 }
996 seg->fc_memput_end = bp;
997 *((void **)(bp)) = NULL;
998
999 mutex_exit(&EMLXS_MEMPUT_LOCK);
1000
1001 /* This is for late PUT's after an initial */
1002 /* emlxs_mem_pool_destroy call */
1003 if ((seg->fc_memflag & FC_MEMSEG_PUT_ENABLED) &&
1004 !(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
1005 emlxs_mem_pool_destroy(hba, seg);
1006 }
1007
769 return;
770
771} /* emlxs_mem_pool_put() */
772
773
774extern MATCHMAP *
775emlxs_mem_buf_alloc(emlxs_hba_t *hba, uint32_t size)
776{

--- 35 unchanged lines hidden (view full) ---

812 bzero(buf_info, sizeof (MBUF_INFO));
813 buf_info->size = sizeof (MATCHMAP);
814 buf_info->virt = (void *)mp;
815 emlxs_mem_free(hba, buf_info);
816
817 return (0);
818 }
819 bp = (uint8_t *)buf_info->virt;
1008 return;
1009
1010} /* emlxs_mem_pool_put() */
1011
1012
1013extern MATCHMAP *
1014emlxs_mem_buf_alloc(emlxs_hba_t *hba, uint32_t size)
1015{

--- 35 unchanged lines hidden (view full) ---

1051 bzero(buf_info, sizeof (MBUF_INFO));
1052 buf_info->size = sizeof (MATCHMAP);
1053 buf_info->virt = (void *)mp;
1054 emlxs_mem_free(hba, buf_info);
1055
1056 return (0);
1057 }
1058 bp = (uint8_t *)buf_info->virt;
820 bzero(bp, MEM_BUF_SIZE);
1059 bzero(bp, buf_info->size);
821
822 mp->virt = buf_info->virt;
823 mp->phys = buf_info->phys;
824 mp->size = buf_info->size;
825 mp->dma_handle = buf_info->dma_handle;
826 mp->data_handle = buf_info->data_handle;
827 mp->tag = MEM_BUF;
828 mp->flag |= MAP_BUF_ALLOCATED;

--- 30 unchanged lines hidden (view full) ---

859 emlxs_mem_free(hba, buf_info);
860
861 return;
862
863} /* emlxs_mem_buf_free() */
864
865
866extern void *
1060
1061 mp->virt = buf_info->virt;
1062 mp->phys = buf_info->phys;
1063 mp->size = buf_info->size;
1064 mp->dma_handle = buf_info->dma_handle;
1065 mp->data_handle = buf_info->data_handle;
1066 mp->tag = MEM_BUF;
1067 mp->flag |= MAP_BUF_ALLOCATED;

--- 30 unchanged lines hidden (view full) ---

1098 emlxs_mem_free(hba, buf_info);
1099
1100 return;
1101
1102} /* emlxs_mem_buf_free() */
1103
1104
1105extern void *
867emlxs_mem_get(emlxs_hba_t *hba, uint32_t seg_id, uint32_t priority)
1106emlxs_mem_get(emlxs_hba_t *hba, uint32_t seg_id)
868{
869 emlxs_port_t *port = &PPORT;
870 void *bp;
871 MAILBOXQ *mbq;
872 IOCBQ *iocbq;
873 NODELIST *node;
874 MEMSEG *seg;
875
876 if (seg_id >= FC_MAX_SEG) {
877
878 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1107{
1108 emlxs_port_t *port = &PPORT;
1109 void *bp;
1110 MAILBOXQ *mbq;
1111 IOCBQ *iocbq;
1112 NODELIST *node;
1113 MEMSEG *seg;
1114
1115 if (seg_id >= FC_MAX_SEG) {
1116
1117 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
879 "emlxs_mem_get: Invalid segment id = %d",
1118 "mem_get: Invalid segment id = %d",
880 seg_id);
881
882 return (NULL);
883 }
884 seg = &hba->memseg[seg_id];
885
886 /* Alloc a buffer from the pool */
1119 seg_id);
1120
1121 return (NULL);
1122 }
1123 seg = &hba->memseg[seg_id];
1124
1125 /* Alloc a buffer from the pool */
887 bp = emlxs_mem_pool_get(hba, seg, priority);
1126 bp = emlxs_mem_pool_get(hba, seg);
888
889 if (bp) {
890 switch (seg_id) {
891 case MEM_MBOX:
892 mbq = (MAILBOXQ *)bp;
893 mbq->flag |= MBQ_POOL_ALLOCATED;
894 break;
895

--- 22 unchanged lines hidden (view full) ---

918 IOCBQ *iocbq;
919 NODELIST *node;
920 MEMSEG *seg;
921 MATCHMAP *mp;
922
923 if (seg_id >= FC_MAX_SEG) {
924
925 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1127
1128 if (bp) {
1129 switch (seg_id) {
1130 case MEM_MBOX:
1131 mbq = (MAILBOXQ *)bp;
1132 mbq->flag |= MBQ_POOL_ALLOCATED;
1133 break;
1134

--- 22 unchanged lines hidden (view full) ---

1157 IOCBQ *iocbq;
1158 NODELIST *node;
1159 MEMSEG *seg;
1160 MATCHMAP *mp;
1161
1162 if (seg_id >= FC_MAX_SEG) {
1163
1164 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
926 "emlxs_mem_put: Invalid segment id = %d: bp=%p",
1165 "mem_put: Invalid segment id = %d: bp=%p",
927 seg_id, bp);
928
929 return;
930 }
931 seg = &hba->memseg[seg_id];
932
933 /* Verify buffer */
934 switch (seg_id) {

--- 383 unchanged lines hidden ---
1166 seg_id, bp);
1167
1168 return;
1169 }
1170 seg = &hba->memseg[seg_id];
1171
1172 /* Verify buffer */
1173 switch (seg_id) {

--- 383 unchanged lines hidden ---