1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  * Copyright (c) 2002-2006 Neterion, Inc.
22  */
23 
24 #ifdef XGE_DEBUG_FP
25 #include "xgehal-fifo.h"
26 #endif
27 
28 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_fifo_txdl_priv_t*
__hal_fifo_txdl_priv(xge_hal_dtr_h dtrh)29 __hal_fifo_txdl_priv(xge_hal_dtr_h dtrh)
30 {
31 	xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t*)dtrh;
32 	xge_hal_fifo_txdl_priv_t *txdl_priv;
33 
34 	xge_assert(txdp);
35 	txdl_priv = (xge_hal_fifo_txdl_priv_t *)
36 				(ulong_t)txdp->host_control;
37 
38 	xge_assert(txdl_priv);
39 	xge_assert(txdl_priv->dma_object);
40 	xge_assert(txdl_priv->dma_addr);
41 
42 	xge_assert(txdl_priv->dma_object->handle == txdl_priv->dma_handle);
43 
44 	return txdl_priv;
45 }
46 
47 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void
__hal_fifo_dtr_post_single(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,u64 ctrl_1)48 __hal_fifo_dtr_post_single(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
49 			u64 ctrl_1)
50 {
51 	xge_hal_fifo_t            *fifo    = (xge_hal_fifo_t *)channelh;
52 	xge_hal_fifo_hw_pair_t    *hw_pair = fifo->hw_pair;
53 	xge_hal_fifo_txd_t        *txdp    = (xge_hal_fifo_txd_t *)dtrh;
54 	xge_hal_fifo_txdl_priv_t  *txdl_priv;
55 	u64			  ctrl;
56 
57 	txdp->control_1 |= XGE_HAL_TXD_LIST_OWN_XENA;
58 
59 #ifdef XGE_DEBUG_ASSERT
60         /* make sure Xena overwrites the (illegal) t_code value on completion */
61         XGE_HAL_SET_TXD_T_CODE(txdp->control_1, XGE_HAL_TXD_T_CODE_UNUSED_5);
62 #endif
63 
64 	txdl_priv = __hal_fifo_txdl_priv(dtrh);
65 
66 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
67 	/* sync the TxDL to device */
68 	xge_os_dma_sync(fifo->channel.pdev,
69 	              txdl_priv->dma_handle,
70 		      txdl_priv->dma_addr,
71 		      txdl_priv->dma_offset,
72 		      txdl_priv->frags << 5 /* sizeof(xge_hal_fifo_txd_t) */,
73 		      XGE_OS_DMA_DIR_TODEVICE);
74 #endif
75 	/* write the pointer first */
76 	xge_os_pio_mem_write64(fifo->channel.pdev,
77 			     fifo->channel.regh1,
78 	                     txdl_priv->dma_addr,
79 	                     &hw_pair->txdl_pointer);
80 
81 	/* spec: 0x00 = 1 TxD in the list */
82 	ctrl = XGE_HAL_TX_FIFO_LAST_TXD_NUM(txdl_priv->frags - 1);
83 	ctrl |= ctrl_1;
84 	ctrl |= fifo->no_snoop_bits;
85 
86 	if (txdp->control_1 & XGE_HAL_TXD_LSO_COF_CTRL(XGE_HAL_TXD_TCP_LSO)) {
87 		ctrl |= XGE_HAL_TX_FIFO_SPECIAL_FUNC;
88 	}
89 
90 	/*
91 	 * according to the XENA spec:
92 	 *
93 	 * It is important to note that pointers and list control words are
94 	 * always written in pairs: in the first write, the host must write a
95 	 * pointer, and in the second write, it must write the list control
96 	 * word. Any other access will result in an error. Also, all 16 bytes
97 	 * of the pointer/control structure must be written, including any
98 	 * reserved bytes.
99 	 */
100 	xge_os_wmb();
101 
102 	/*
103 	 * we want touch work_arr in order with ownership bit set to HW
104 	 */
105 	__hal_channel_dtr_post(channelh, dtrh);
106 
107 	xge_os_pio_mem_write64(fifo->channel.pdev, fifo->channel.regh1,
108 			ctrl, &hw_pair->list_control);
109 
110 	xge_debug_fifo(XGE_TRACE, "posted txdl 0x"XGE_OS_LLXFMT" ctrl 0x"XGE_OS_LLXFMT" "
111 		"into 0x"XGE_OS_LLXFMT"", (unsigned long long)txdl_priv->dma_addr,
112 		(unsigned long long)ctrl,
113 		(unsigned long long)(ulong_t)&hw_pair->txdl_pointer);
114 
115 #ifdef XGE_HAL_FIFO_DUMP_TXD
116 	xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"
117 		XGE_OS_LLXFMT" dma "XGE_OS_LLXFMT,
118 		txdp->control_1, txdp->control_2, txdp->buffer_pointer,
119 		txdp->host_control, txdl_priv->dma_addr);
120 #endif
121 
122 	fifo->channel.stats.total_posts++;
123 	fifo->channel.usage_cnt++;
124 	if (fifo->channel.stats.usage_max < fifo->channel.usage_cnt)
125 		fifo->channel.stats.usage_max = fifo->channel.usage_cnt;
126 }
127 
128 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void
__hal_fifo_txdl_free_many(xge_hal_channel_h channelh,xge_hal_fifo_txd_t * txdp,int list_size,int frags)129 __hal_fifo_txdl_free_many(xge_hal_channel_h channelh,
130 			  xge_hal_fifo_txd_t *txdp, int list_size, int frags)
131 {
132 	xge_hal_fifo_txdl_priv_t *current_txdl_priv;
133 	xge_hal_fifo_txdl_priv_t *next_txdl_priv;
134 	int invalid_frags = frags % list_size;
135 	if (invalid_frags){
136 		xge_debug_fifo(XGE_ERR,
137 			"freeing corrupt dtrh %p, fragments %d list size %d",
138 			txdp, frags, list_size);
139 		xge_assert(invalid_frags == 0);
140 	}
141 	while(txdp){
142 		xge_debug_fifo(XGE_TRACE,
143 			"freeing linked dtrh %p, fragments %d list size %d",
144 			txdp, frags, list_size);
145 		current_txdl_priv = __hal_fifo_txdl_priv(txdp);
146 #if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
147 		current_txdl_priv->allocated = 0;
148 #endif
149 		__hal_channel_dtr_free(channelh, txdp);
150 		next_txdl_priv = current_txdl_priv->next_txdl_priv;
151 		xge_assert(frags);
152 		frags -= list_size;
153 		if (next_txdl_priv) {
154 			current_txdl_priv->next_txdl_priv = NULL;
155 			txdp = next_txdl_priv->first_txdp;
156 		}
157 		else {
158 			xge_debug_fifo(XGE_TRACE,
159 			"freed linked dtrh fragments %d list size %d",
160 			frags, list_size);
161 			break;
162 		}
163 	}
164 	xge_assert(frags == 0)
165 }
166 
167 __HAL_STATIC_FIFO  __HAL_INLINE_FIFO void
__hal_fifo_txdl_restore_many(xge_hal_channel_h channelh,xge_hal_fifo_txd_t * txdp,int txdl_count)168 __hal_fifo_txdl_restore_many(xge_hal_channel_h channelh,
169 			  xge_hal_fifo_txd_t *txdp, int txdl_count)
170 {
171 	xge_hal_fifo_txdl_priv_t *current_txdl_priv;
172 	xge_hal_fifo_txdl_priv_t *next_txdl_priv;
173 	int i = txdl_count;
174 
175 	xge_assert(((xge_hal_channel_t *)channelh)->reserve_length +
176 		txdl_count <= ((xge_hal_channel_t *)channelh)->reserve_initial);
177 
178 	current_txdl_priv = __hal_fifo_txdl_priv(txdp);
179 	do{
180 		xge_assert(i);
181 #if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
182 		current_txdl_priv->allocated = 0;
183 #endif
184 		next_txdl_priv = current_txdl_priv->next_txdl_priv;
185 		txdp = current_txdl_priv->first_txdp;
186 		current_txdl_priv->next_txdl_priv = NULL;
187 		__hal_channel_dtr_restore(channelh, (xge_hal_dtr_h )txdp, --i);
188 		xge_debug_fifo(XGE_TRACE,
189 			"dtrh %p restored at offset %d", txdp, i);
190 		current_txdl_priv = next_txdl_priv;
191 	} while(current_txdl_priv);
192 	__hal_channel_dtr_restore(channelh, NULL, txdl_count);
193 }
194 /**
195  * xge_hal_fifo_dtr_private - Retrieve per-descriptor private data.
196  * @channelh: Channel handle.
197  * @dtrh: Descriptor handle.
198  *
199  * Retrieve per-descriptor private data.
200  * Note that ULD requests per-descriptor space via
201  * xge_hal_channel_open().
202  *
203  * Returns: private ULD data associated with the descriptor.
204  * Usage: See ex_xmit{} and ex_tx_compl{}.
205  */
206 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void*
xge_hal_fifo_dtr_private(xge_hal_dtr_h dtrh)207 xge_hal_fifo_dtr_private(xge_hal_dtr_h dtrh)
208 {
209 	xge_hal_fifo_txd_t *txdp    = (xge_hal_fifo_txd_t *)dtrh;
210 
211 	return ((char *)(ulong_t)txdp->host_control) +
212 					sizeof(xge_hal_fifo_txdl_priv_t);
213 }
214 
215 /**
216  * xge_hal_fifo_dtr_buffer_cnt - Get number of buffers carried by the
217  * descriptor.
218  * @dtrh: Descriptor handle.
219  *
220  * Returns: Number of buffers stored in the given descriptor. Can be used
221  * _after_ the descriptor is set up for posting (see
222  * xge_hal_fifo_dtr_post()) and _before_ it is deallocated (see
223  * xge_hal_fifo_dtr_free()).
224  *
225  */
226 __HAL_STATIC_FIFO __HAL_INLINE_FIFO int
xge_hal_fifo_dtr_buffer_cnt(xge_hal_dtr_h dtrh)227 xge_hal_fifo_dtr_buffer_cnt(xge_hal_dtr_h dtrh)
228 {
229 	xge_hal_fifo_txdl_priv_t  *txdl_priv;
230 
231 	txdl_priv = __hal_fifo_txdl_priv(dtrh);
232 
233 	return txdl_priv->frags;
234 }
235 /**
236  * xge_hal_fifo_dtr_reserve_many- Reserve fifo descriptors which span more
237  *	than single txdl.
238  * @channelh: Channel handle.
239  * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
240  *        with a valid handle.
241  * @frags: minimum number of fragments to be reserved.
242  *
243  * Reserve TxDL(s) (that is, fifo descriptor)
244  * for the subsequent filling-in by upper layerdriver (ULD))
245  * and posting on the corresponding channel (@channelh)
246  * via xge_hal_fifo_dtr_post().
247  *
248  * Returns: XGE_HAL_OK - success;
249  * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
250  *
251  * See also: xge_hal_fifo_dtr_reserve_sp(), xge_hal_fifo_dtr_free(),
252  * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
253  * Usage: See ex_xmit{}.
254  */
255 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh,xge_hal_dtr_h * dtrh,const int frags)256 xge_hal_fifo_dtr_reserve_many(xge_hal_channel_h channelh,
257 				xge_hal_dtr_h *dtrh, const int frags)
258 {
259 	xge_hal_status_e status = XGE_HAL_OK;
260 	int alloc_frags = 0, dang_frags = 0;
261 	xge_hal_fifo_txd_t *curr_txdp = NULL;
262 	xge_hal_fifo_txd_t *next_txdp;
263 	xge_hal_fifo_txdl_priv_t *next_txdl_priv, *curr_txdl_priv = NULL;
264 	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
265 	int max_frags = fifo->config->max_frags;
266 	xge_hal_dtr_h dang_dtrh = NULL;
267 #if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
268 	unsigned long flags=0;
269 #endif
270 	xge_debug_fifo(XGE_TRACE, "dtr_reserve_many called for frags %d",
271 		frags);
272 	xge_assert(frags < (fifo->txdl_per_memblock * max_frags));
273 #if defined(XGE_HAL_TX_MULTI_RESERVE)
274 	xge_os_spin_lock(&fifo->channel.reserve_lock);
275 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
276 	xge_os_spin_lock_irq(&fifo->channel.reserve_lock, flags);
277 #endif
278 	while(alloc_frags < frags) {
279 		status = __hal_channel_dtr_alloc(channelh,
280 				(xge_hal_dtr_h *)(void*)&next_txdp);
281 		if (status != XGE_HAL_OK){
282 			xge_debug_fifo(XGE_ERR,
283 				"failed to allocate linked fragments rc %d",
284 				 status);
285 			xge_assert(status == XGE_HAL_INF_OUT_OF_DESCRIPTORS);
286 			if (*dtrh) {
287 				xge_assert(alloc_frags/max_frags);
288 				__hal_fifo_txdl_restore_many(channelh,
289 					(xge_hal_fifo_txd_t *) *dtrh, alloc_frags/max_frags);
290 			}
291 			if (dang_dtrh) {
292 				xge_assert(dang_frags/max_frags);
293 				__hal_fifo_txdl_restore_many(channelh,
294 					(xge_hal_fifo_txd_t *) dang_dtrh, dang_frags/max_frags);
295 			}
296 			break;
297 		}
298 		xge_debug_fifo(XGE_TRACE, "allocated linked dtrh %p"
299 			" for frags %d", next_txdp, frags);
300 		next_txdl_priv = __hal_fifo_txdl_priv(next_txdp);
301 		xge_assert(next_txdl_priv);
302 		xge_assert(next_txdl_priv->first_txdp == next_txdp);
303 		next_txdl_priv->dang_txdl = NULL;
304 		next_txdl_priv->dang_frags = 0;
305 		next_txdl_priv->next_txdl_priv = NULL;
306 #if defined(XGE_OS_MEMORY_CHECK)
307 		next_txdl_priv->allocated = 1;
308 #endif
309 		if (!curr_txdp || !curr_txdl_priv) {
310 			curr_txdp = next_txdp;
311 			curr_txdl_priv = next_txdl_priv;
312 			*dtrh = (xge_hal_dtr_h)next_txdp;
313 			alloc_frags = max_frags;
314 			continue;
315 		}
316 		if (curr_txdl_priv->memblock ==
317 			next_txdl_priv->memblock) {
318 			xge_debug_fifo(XGE_TRACE,
319 				"linking dtrh %p, with %p",
320 				*dtrh, next_txdp);
321 			xge_assert (next_txdp ==
322 				curr_txdp + max_frags);
323 			alloc_frags += max_frags;
324 			curr_txdl_priv->next_txdl_priv = next_txdl_priv;
325 		}
326 		else {
327 			xge_assert(*dtrh);
328 			xge_assert(dang_dtrh == NULL);
329 			dang_dtrh = *dtrh;
330 			dang_frags = alloc_frags;
331 			xge_debug_fifo(XGE_TRACE,
332 				"dangling dtrh %p, linked with dtrh %p",
333 				*dtrh, next_txdp);
334 			next_txdl_priv->dang_txdl = (xge_hal_fifo_txd_t *) *dtrh;
335 			next_txdl_priv->dang_frags = alloc_frags;
336 			alloc_frags = max_frags;
337 			*dtrh  = next_txdp;
338 		}
339 		curr_txdp = next_txdp;
340 		curr_txdl_priv = next_txdl_priv;
341 	}
342 
343 #if defined(XGE_HAL_TX_MULTI_RESERVE)
344 	xge_os_spin_unlock(&fifo->channel.reserve_lock);
345 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
346 	xge_os_spin_unlock_irq(&fifo->channel.reserve_lock, flags);
347 #endif
348 
349 	if (status == XGE_HAL_OK) {
350 		xge_hal_fifo_txdl_priv_t * txdl_priv;
351 		xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh;
352 		xge_hal_stats_channel_info_t *statsp = &fifo->channel.stats;
353 		txdl_priv = __hal_fifo_txdl_priv(txdp);
354 		/* reset the TxDL's private */
355 		txdl_priv->align_dma_offset = 0;
356 		txdl_priv->align_vaddr_start = txdl_priv->align_vaddr;
357 		txdl_priv->align_used_frags = 0;
358 		txdl_priv->frags = 0;
359 		txdl_priv->bytes_sent = 0;
360 		txdl_priv->alloc_frags = alloc_frags;
361 		/* reset TxD0 */
362 		txdp->control_1 = txdp->control_2 = 0;
363 
364 #if defined(XGE_OS_MEMORY_CHECK)
365 		txdl_priv->allocated = 1;
366 #endif
367 		/* update statistics */
368 		statsp->total_posts_dtrs_many++;
369 		statsp->total_posts_frags_many += txdl_priv->alloc_frags;
370 		if (txdl_priv->dang_frags){
371 			statsp->total_posts_dang_dtrs++;
372 			statsp->total_posts_dang_frags += txdl_priv->dang_frags;
373 		}
374 	}
375 
376 	return status;
377 }
378 
379 /**
380  * xge_hal_fifo_dtr_reserve - Reserve fifo descriptor.
381  * @channelh: Channel handle.
382  * @dtrh: Reserved descriptor. On success HAL fills this "out" parameter
383  *        with a valid handle.
384  *
385  * Reserve a single TxDL (that is, fifo descriptor)
386  * for the subsequent filling-in by upper layerdriver (ULD))
387  * and posting on the corresponding channel (@channelh)
388  * via xge_hal_fifo_dtr_post().
389  *
390  * Note: it is the responsibility of ULD to reserve multiple descriptors
391  * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
392  * carries up to configured number (fifo.max_frags) of contiguous buffers.
393  *
394  * Returns: XGE_HAL_OK - success;
395  * XGE_HAL_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
396  *
397  * See also: xge_hal_fifo_dtr_reserve_sp(), xge_hal_fifo_dtr_free(),
398  * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
399  * Usage: See ex_xmit{}.
400  */
401 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_reserve(xge_hal_channel_h channelh,xge_hal_dtr_h * dtrh)402 xge_hal_fifo_dtr_reserve(xge_hal_channel_h channelh, xge_hal_dtr_h *dtrh)
403 {
404 	xge_hal_status_e status;
405 #if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
406 	unsigned long flags=0;
407 #endif
408 
409 #if defined(XGE_HAL_TX_MULTI_RESERVE)
410 	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->reserve_lock);
411 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
412 	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
413 	                     flags);
414 #endif
415 
416 	status = __hal_channel_dtr_alloc(channelh, dtrh);
417 
418 #if defined(XGE_HAL_TX_MULTI_RESERVE)
419 	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->reserve_lock);
420 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
421 	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->reserve_lock,
422 	                       flags);
423 #endif
424 
425 	if (status == XGE_HAL_OK) {
426 		xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)*dtrh;
427 		xge_hal_fifo_txdl_priv_t *txdl_priv;
428 
429 		txdl_priv = __hal_fifo_txdl_priv(txdp);
430 
431 		/* reset the TxDL's private */
432 		txdl_priv->align_dma_offset = 0;
433 		txdl_priv->align_vaddr_start = txdl_priv->align_vaddr;
434 		txdl_priv->align_used_frags = 0;
435 		txdl_priv->frags = 0;
436 		txdl_priv->alloc_frags =
437 			((xge_hal_fifo_t *)channelh)->config->max_frags;
438 		txdl_priv->dang_txdl = NULL;
439 		txdl_priv->dang_frags = 0;
440 		txdl_priv->next_txdl_priv = NULL;
441 		txdl_priv->bytes_sent = 0;
442 
443 		/* reset TxD0 */
444 		txdp->control_1 = txdp->control_2 = 0;
445 
446 #if defined(XGE_OS_MEMORY_CHECK)
447 		txdl_priv->allocated = 1;
448 #endif
449 	}
450 
451 	return status;
452 }
453 
454 /**
455  * xge_hal_fifo_dtr_reserve_sp - Reserve fifo descriptor and store it in
456  * the ULD-provided "scratch" memory.
457  * @channelh: Channel handle.
458  * @dtr_sp_size: Size of the %dtr_sp "scratch pad" that HAL can use for TxDL.
459  * @dtr_sp: "Scratch pad" supplied by upper-layer driver (ULD).
460  *
461  * Reserve TxDL and fill-in ULD supplied "scratch pad". The difference
462  * between this API and xge_hal_fifo_dtr_reserve() is (possibly) -
463  * performance.
464  *
465  * If upper-layer uses ULP-defined commands, and if those commands have enough
466  * space for HAL/Xframe descriptors - tnan it is better (read: faster) to fit
467  * all the per-command information into one command, which is typically
468  * one contiguous block.
469  *
470  * Note: Unlike xge_hal_fifo_dtr_reserve(), this function can be used to
471  * allocate a single descriptor for transmit operation.
472  *
473  * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_free(),
474  * xge_hal_ring_dtr_reserve(), xge_hal_status_e{}.
475  */
476 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_reserve_sp(xge_hal_channel_h channelh,int dtr_sp_size,xge_hal_dtr_h dtr_sp)477 xge_hal_fifo_dtr_reserve_sp(xge_hal_channel_h channelh, int dtr_sp_size,
478 			xge_hal_dtr_h dtr_sp)
479 {
480 	/* FIXME: implement */
481 	return XGE_HAL_OK;
482 }
483 
484 /**
485  * xge_hal_fifo_dtr_post - Post descriptor on the fifo channel.
486  * @channelh: Channel handle.
487  * @dtrh: Descriptor obtained via xge_hal_fifo_dtr_reserve() or
488  * xge_hal_fifo_dtr_reserve_sp()
489  * @frags: Number of contiguous buffers that are part of a single
490  *         transmit operation.
491  *
492  * Post descriptor on the 'fifo' type channel for transmission.
493  * Prior to posting the descriptor should be filled in accordance with
494  * Host/Xframe interface specification for a given service (LL, etc.).
495  *
496  * See also: xge_hal_fifo_dtr_post_many(), xge_hal_ring_dtr_post().
497  * Usage: See ex_xmit{}.
498  */
499 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_post(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh)500 xge_hal_fifo_dtr_post(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh)
501 {
502 	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
503 	xge_hal_fifo_txdl_priv_t *txdl_priv;
504 	xge_hal_fifo_txd_t *txdp_last;
505 	xge_hal_fifo_txd_t *txdp_first;
506 #if defined(XGE_HAL_TX_MULTI_POST_IRQ)
507 	unsigned long flags = 0;
508 #endif
509 
510 	txdl_priv = __hal_fifo_txdl_priv(dtrh);
511 
512 	txdp_first = (xge_hal_fifo_txd_t *)dtrh;
513 	txdp_first->control_1 |= XGE_HAL_TXD_GATHER_CODE_FIRST;
514 	txdp_first->control_2 |= fifo->interrupt_type;
515 
516 	txdp_last = (xge_hal_fifo_txd_t *)dtrh + (txdl_priv->frags - 1);
517 	txdp_last->control_1 |= XGE_HAL_TXD_GATHER_CODE_LAST;
518 
519 #if defined(XGE_HAL_TX_MULTI_POST)
520 	xge_os_spin_lock(fifo->post_lock_ptr);
521 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
522 	xge_os_spin_lock_irq(fifo->post_lock_ptr, flags);
523 #endif
524 
525 	__hal_fifo_dtr_post_single(channelh, dtrh,
526 		 (u64)(XGE_HAL_TX_FIFO_FIRST_LIST | XGE_HAL_TX_FIFO_LAST_LIST));
527 
528 #if defined(XGE_HAL_TX_MULTI_POST)
529 	xge_os_spin_unlock(fifo->post_lock_ptr);
530 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
531 	xge_os_spin_unlock_irq(fifo->post_lock_ptr, flags);
532 #endif
533 }
534 
535 /**
536  * xge_hal_fifo_dtr_post_many - Post multiple descriptors on fifo
537  * channel.
538  * @channelh: Channel to post descriptor.
539  * @num: Number of descriptors (i.e., fifo TxDLs) in the %dtrs[].
540  * @dtrs: Descriptors obtained via xge_hal_fifo_dtr_reserve().
541  * @frags_arr: Number of fragments carried @dtrs descriptors.
542  * Note that frag_arr[i] corresponds to descriptor dtrs[i].
543  *
544  * Post multi-descriptor on the fifo channel. The operation is atomic:
545  * all descriptrs are posted on the channel "back-to-back' without
546  * letting other posts (possibly driven by multiple transmitting threads)
547  * to interleave.
548  *
549  * See also: xge_hal_fifo_dtr_post(), xge_hal_ring_dtr_post().
550  */
551 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh,int num,xge_hal_dtr_h dtrs[])552 xge_hal_fifo_dtr_post_many(xge_hal_channel_h channelh, int num,
553 			xge_hal_dtr_h dtrs[])
554 {
555 	int i;
556 	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
557 	xge_hal_fifo_txd_t *txdp_last;
558 	xge_hal_fifo_txd_t *txdp_first;
559 	xge_hal_fifo_txdl_priv_t *txdl_priv_last;
560 #if defined(XGE_HAL_TX_MULTI_POST_IRQ)
561 	unsigned long flags = 0;
562 #endif
563 
564 	xge_assert(num > 1);
565 
566 	txdp_first = (xge_hal_fifo_txd_t *)dtrs[0];
567 	txdp_first->control_1 |= XGE_HAL_TXD_GATHER_CODE_FIRST;
568 	txdp_first->control_2 |= fifo->interrupt_type;
569 
570 	txdl_priv_last = __hal_fifo_txdl_priv(dtrs[num-1]);
571 	txdp_last = (xge_hal_fifo_txd_t *)dtrs[num-1] +
572 					(txdl_priv_last->frags - 1);
573 	txdp_last->control_1 |= XGE_HAL_TXD_GATHER_CODE_LAST;
574 
575 #if defined(XGE_HAL_TX_MULTI_POST)
576 	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->post_lock);
577 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
578 	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
579 	flags);
580 #endif
581 
582 	for (i=0; i<num; i++) {
583 		xge_hal_fifo_txdl_priv_t *txdl_priv;
584 		u64 val64;
585 		xge_hal_dtr_h dtrh = dtrs[i];
586 
587 		txdl_priv = __hal_fifo_txdl_priv(dtrh);
588 		txdl_priv = txdl_priv; /* Cheat lint */
589 
590 		val64 = 0;
591 		if (i == 0) {
592 			 val64 |= XGE_HAL_TX_FIFO_FIRST_LIST;
593 		} else if (i == num -1) {
594 			 val64 |= XGE_HAL_TX_FIFO_LAST_LIST;
595 		}
596 
597 		val64 |= XGE_HAL_TX_FIFO_SPECIAL_FUNC;
598 		__hal_fifo_dtr_post_single(channelh, dtrh, val64);
599 	}
600 
601 #if defined(XGE_HAL_TX_MULTI_POST)
602 	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->post_lock);
603 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
604 	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->post_lock,
605 	flags);
606 #endif
607 
608 	fifo->channel.stats.total_posts_many++;
609 }
610 
611 /**
612  * xge_hal_fifo_dtr_next_completed - Retrieve next completed descriptor.
613  * @channelh: Channel handle.
614  * @dtrh: Descriptor handle. Returned by HAL.
615  * @t_code: Transfer code, as per Xframe User Guide,
616  *          Transmit Descriptor Format.
617  *          Returned by HAL.
618  *
619  * Retrieve the _next_ completed descriptor.
620  * HAL uses channel callback (*xge_hal_channel_callback_f) to notifiy
621  * upper-layer driver (ULD) of new completed descriptors. After that
622  * the ULD can use xge_hal_fifo_dtr_next_completed to retrieve the rest
623  * completions (the very first completion is passed by HAL via
624  * xge_hal_channel_callback_f).
625  *
626  * Implementation-wise, the upper-layer driver is free to call
627  * xge_hal_fifo_dtr_next_completed either immediately from inside the
628  * channel callback, or in a deferred fashion and separate (from HAL)
629  * context.
630  *
631  * Non-zero @t_code means failure to process the descriptor.
632  * The failure could happen, for instance, when the link is
633  * down, in which case Xframe completes the descriptor because it
634  * is not able to send the data out.
635  *
636  * For details please refer to Xframe User Guide.
637  *
638  * Returns: XGE_HAL_OK - success.
639  * XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
640  * are currently available for processing.
641  *
642  * See also: xge_hal_channel_callback_f{},
643  * xge_hal_ring_dtr_next_completed().
644  * Usage: See ex_tx_compl{}.
645  */
646 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh,xge_hal_dtr_h * dtrh,u8 * t_code)647 xge_hal_fifo_dtr_next_completed(xge_hal_channel_h channelh,
648 			xge_hal_dtr_h *dtrh, u8 *t_code)
649 {
650 	xge_hal_fifo_txd_t        *txdp;
651 	xge_hal_fifo_t            *fifo    = (xge_hal_fifo_t *)channelh;
652 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
653 	xge_hal_fifo_txdl_priv_t  *txdl_priv;
654 #endif
655 
656 	__hal_channel_dtr_try_complete(channelh, dtrh);
657 	txdp = (xge_hal_fifo_txd_t *)*dtrh;
658 	if (txdp == NULL) {
659 		return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
660 	}
661 
662 #if defined(XGE_OS_DMA_REQUIRES_SYNC) && defined(XGE_HAL_DMA_DTR_STREAMING)
663 	txdl_priv = __hal_fifo_txdl_priv(txdp);
664 
665 	/* sync TxDL to read the ownership
666 	 *
667 	 * Note: 16bytes means Control_1 & Control_2 */
668 	xge_os_dma_sync(fifo->channel.pdev,
669 	              txdl_priv->dma_handle,
670 		      txdl_priv->dma_addr,
671 		      txdl_priv->dma_offset,
672 		      16,
673 		      XGE_OS_DMA_DIR_FROMDEVICE);
674 #endif
675 
676 	/* check whether host owns it */
677 	if ( !(txdp->control_1 & XGE_HAL_TXD_LIST_OWN_XENA) ) {
678 
679 		xge_assert(txdp->host_control!=0);
680 
681 		__hal_channel_dtr_complete(channelh);
682 
683 		*t_code = (u8)XGE_HAL_GET_TXD_T_CODE(txdp->control_1);
684 
685                 /* see XGE_HAL_SET_TXD_T_CODE() above.. */
686                 xge_assert(*t_code != XGE_HAL_TXD_T_CODE_UNUSED_5);
687 
688 		if (fifo->channel.usage_cnt > 0)
689 			fifo->channel.usage_cnt--;
690 
691 		return XGE_HAL_OK;
692 	}
693 
694 	/* no more completions */
695 	*dtrh = 0;
696 	return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
697 }
698 
699 /**
700  * xge_hal_fifo_dtr_free - Free descriptor.
701  * @channelh: Channel handle.
702  * @dtr: Descriptor handle.
703  *
704  * Free the reserved descriptor. This operation is "symmetrical" to
705  * xge_hal_fifo_dtr_reserve or xge_hal_fifo_dtr_reserve_sp.
706  * The "free-ing" completes the descriptor's lifecycle.
707  *
708  * After free-ing (see xge_hal_fifo_dtr_free()) the descriptor again can
709  * be:
710  *
711  * - reserved (xge_hal_fifo_dtr_reserve);
712  *
713  * - posted (xge_hal_fifo_dtr_post);
714  *
715  * - completed (xge_hal_fifo_dtr_next_completed);
716  *
717  * - and recycled again (xge_hal_fifo_dtr_free).
718  *
719  * For alternative state transitions and more details please refer to
720  * the design doc.
721  *
722  * See also: xge_hal_ring_dtr_free(), xge_hal_fifo_dtr_reserve().
723  * Usage: See ex_tx_compl{}.
724  */
725 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_free(xge_hal_channel_h channelh,xge_hal_dtr_h dtr)726 xge_hal_fifo_dtr_free(xge_hal_channel_h channelh, xge_hal_dtr_h dtr)
727 {
728 #if defined(XGE_HAL_TX_MULTI_FREE_IRQ)
729 	unsigned long flags = 0;
730 #endif
731 	xge_hal_fifo_txdl_priv_t *txdl_priv = __hal_fifo_txdl_priv(
732 					(xge_hal_fifo_txd_t *)dtr);
733 	int max_frags = ((xge_hal_fifo_t *)channelh)->config->max_frags;
734 #if defined(XGE_HAL_TX_MULTI_FREE)
735 	xge_os_spin_lock(&((xge_hal_channel_t*)channelh)->free_lock);
736 #elif defined(XGE_HAL_TX_MULTI_FREE_IRQ)
737 	xge_os_spin_lock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
738 	flags);
739 #endif
740 
741 	if (txdl_priv->alloc_frags > max_frags) {
742 		xge_hal_fifo_txd_t *dang_txdp = (xge_hal_fifo_txd_t *)
743 						txdl_priv->dang_txdl;
744 		int dang_frags = txdl_priv->dang_frags;
745 		int alloc_frags = txdl_priv->alloc_frags;
746 		txdl_priv->dang_txdl = NULL;
747 		txdl_priv->dang_frags = 0;
748 		txdl_priv->alloc_frags = 0;
749 		/* dtrh must have a linked list of dtrh */
750 		xge_assert(txdl_priv->next_txdl_priv);
751 
752 		/* free any dangling dtrh first */
753 		if (dang_txdp) {
754 			xge_debug_fifo(XGE_TRACE,
755 				"freeing dangled dtrh %p for %d fragments",
756 				dang_txdp, dang_frags);
757 			__hal_fifo_txdl_free_many(channelh, dang_txdp,
758 				max_frags, dang_frags);
759 		}
760 
761 		/* now free the reserved dtrh list */
762 		xge_debug_fifo(XGE_TRACE,
763 				"freeing dtrh %p list of %d fragments", dtr,
764 				alloc_frags);
765 		__hal_fifo_txdl_free_many(channelh,
766 				(xge_hal_fifo_txd_t *)dtr, max_frags,
767 				alloc_frags);
768 	}
769 	else
770 		__hal_channel_dtr_free(channelh, dtr);
771 
772 	((xge_hal_channel_t *)channelh)->poll_bytes += txdl_priv->bytes_sent;
773 
774 #if defined(XGE_DEBUG_ASSERT) && defined(XGE_OS_MEMORY_CHECK)
775 	__hal_fifo_txdl_priv(dtr)->allocated = 0;
776 #endif
777 
778 #if defined(XGE_HAL_TX_MULTI_FREE)
779 	xge_os_spin_unlock(&((xge_hal_channel_t*)channelh)->free_lock);
780 #elif defined(XGE_HAL_TX_MULTI_FREE_IRQ)
781 	xge_os_spin_unlock_irq(&((xge_hal_channel_t*)channelh)->free_lock,
782 	flags);
783 #endif
784 }
785 
786 
787 /**
788  * xge_hal_fifo_dtr_buffer_set_aligned - Align transmit buffer and fill
789  * in fifo descriptor.
790  * @channelh: Channel handle.
791  * @dtrh: Descriptor handle.
792  * @frag_idx: Index of the data buffer in the caller's scatter-gather list�
793  *            (of buffers).
794  * @vaddr: Virtual address of the data buffer.
795  * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
796  * @size: Size of the data buffer (in bytes).
797  * @misaligned_size: Size (in bytes) of the misaligned portion of the
798  * data buffer. Calculated by the caller, based on the platform/OS/other
799  * specific criteria, which is outside of HAL's domain. See notes below.
800  *
801  * This API is part of the transmit descriptor preparation for posting
802  * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
803  * xge_hal_fifo_dtr_mss_set() and xge_hal_fifo_dtr_cksum_set_bits().
804  * All three APIs fill in the fields of the fifo descriptor,
805  * in accordance with the Xframe specification.
806  * On the PCI-X based systems aligning transmit data typically provides better
807  * transmit performance. The typical alignment granularity: L2 cacheline size.
808  * However, HAL does not make assumptions in terms of the alignment granularity;
809  * this is specified via additional @misaligned_size parameter described above.
810  * Prior to calling xge_hal_fifo_dtr_buffer_set_aligned(),
811  * ULD is supposed to check alignment of a given fragment/buffer. For this HAL
812  * provides a separate xge_hal_check_alignment() API sufficient to cover
813  * most (but not all) possible alignment criteria.
814  * If the buffer appears to be aligned, the ULD calls
815  * xge_hal_fifo_dtr_buffer_set().
816  * Otherwise, ULD calls xge_hal_fifo_dtr_buffer_set_aligned().
817  *
818  * Note; This API is a "superset" of xge_hal_fifo_dtr_buffer_set(). In
819  * addition to filling in the specified descriptor it aligns transmit data on
820  * the specified boundary.
821  * Note: Decision on whether to align or not to align a given contiguous
822  * transmit buffer is outside of HAL's domain. To this end ULD can use any
823  * programmable criteria, which can help to 1) boost transmit performance,
824  * and/or 2) provide a workaround for PCI bridge bugs, if any.
825  *
826  * See also: xge_hal_fifo_dtr_buffer_set(),
827  * xge_hal_check_alignment().
828  *
829  * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_post(),
830  * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_cksum_set_bits()
831  */
832 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,int frag_idx,void * vaddr,dma_addr_t dma_pointer,int size,int misaligned_size)833 xge_hal_fifo_dtr_buffer_set_aligned(xge_hal_channel_h channelh,
834 			xge_hal_dtr_h dtrh, int frag_idx, void *vaddr,
835 			dma_addr_t dma_pointer, int size, int misaligned_size)
836 {
837 	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
838 	xge_hal_fifo_txdl_priv_t *txdl_priv;
839 	xge_hal_fifo_txd_t *txdp;
840 	int remaining_size;
841 	ptrdiff_t prev_boff;
842 
843 	txdl_priv = __hal_fifo_txdl_priv(dtrh);
844 	txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;
845 
846 	if (frag_idx != 0) {
847 		txdp->control_1 = txdp->control_2 = 0;
848 	}
849 
850 	/* On some systems buffer size could be zero.
851 	 * It is the responsibility of ULD and *not HAL* to
852 	 * detect it and skip it. */
853 	xge_assert(size > 0);
854 	xge_assert(frag_idx < txdl_priv->alloc_frags);
855 	xge_assert(misaligned_size != 0 &&
856 		    misaligned_size <= fifo->config->alignment_size);
857 
858 	remaining_size = size - misaligned_size;
859 	xge_assert(remaining_size >= 0);
860 
861 	xge_os_memcpy((char*)txdl_priv->align_vaddr_start,
862                       vaddr, misaligned_size);
863 
864         if (txdl_priv->align_used_frags >= fifo->config->max_aligned_frags) {
865 	        return XGE_HAL_ERR_OUT_ALIGNED_FRAGS;
866         }
867 
868 	/* setup new buffer */
869 	prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
870 	txdp->buffer_pointer = (u64)txdl_priv->align_dma_addr + prev_boff;
871 	txdp->control_1 |= XGE_HAL_TXD_BUFFER0_SIZE(misaligned_size);
872 	txdl_priv->bytes_sent += misaligned_size;
873 	fifo->channel.stats.total_buffers++;
874 	txdl_priv->frags++;
875 	txdl_priv->align_used_frags++;
876 	txdl_priv->align_vaddr_start += fifo->config->alignment_size;
877         txdl_priv->align_dma_offset = 0;
878 
879 #if defined(XGE_OS_DMA_REQUIRES_SYNC)
880 	/* sync new buffer */
881 	xge_os_dma_sync(fifo->channel.pdev,
882 		      txdl_priv->align_dma_handle,
883 		      txdp->buffer_pointer,
884 		      0,
885 		      misaligned_size,
886 		      XGE_OS_DMA_DIR_TODEVICE);
887 #endif
888 
889 	if (remaining_size) {
890 		xge_assert(frag_idx < txdl_priv->alloc_frags);
891 		txdp++;
892 		txdp->buffer_pointer = (u64)dma_pointer +
893 					misaligned_size;
894 		txdp->control_1 =
895 			XGE_HAL_TXD_BUFFER0_SIZE(remaining_size);
896 		txdl_priv->bytes_sent += remaining_size;
897 		txdp->control_2 = 0;
898 		fifo->channel.stats.total_buffers++;
899 		txdl_priv->frags++;
900 	}
901 
902 	return XGE_HAL_OK;
903 }
904 
905 /**
906  * xge_hal_fifo_dtr_buffer_append - Append the contents of virtually
907  * contiguous data buffer to a single physically contiguous buffer.
908  * @channelh: Channel handle.
909  * @dtrh: Descriptor handle.
910  * @vaddr: Virtual address of the data buffer.
911  * @size: Size of the data buffer (in bytes).
912  *
913  * This API is part of the transmit descriptor preparation for posting
914  * (via xge_hal_fifo_dtr_post()).
915  * The main difference of this API wrt to the APIs
916  * xge_hal_fifo_dtr_buffer_set_aligned() is that this API appends the
917  * contents of virtually contiguous data buffers received from
918  * upper layer into a single physically contiguous data buffer and the
919  * device will do a DMA from this buffer.
920  *
921  * See Also: xge_hal_fifo_dtr_buffer_finalize(), xge_hal_fifo_dtr_buffer_set(),
922  * xge_hal_fifo_dtr_buffer_set_aligned().
923  */
924 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,void * vaddr,int size)925 xge_hal_fifo_dtr_buffer_append(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
926 		void *vaddr, int size)
927 {
928 	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
929 	xge_hal_fifo_txdl_priv_t *txdl_priv;
930 	ptrdiff_t used;
931 
932 	xge_assert(size > 0);
933 
934 	txdl_priv = __hal_fifo_txdl_priv(dtrh);
935 
936 	used = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
937 	used += txdl_priv->align_dma_offset;
938 	if (used + (unsigned int)size > (unsigned int)fifo->align_size)
939 	        return XGE_HAL_ERR_OUT_ALIGNED_FRAGS;
940 
941 	xge_os_memcpy((char*)txdl_priv->align_vaddr_start +
942 		txdl_priv->align_dma_offset, vaddr, size);
943 
944 	fifo->channel.stats.copied_frags++;
945 
946 	txdl_priv->align_dma_offset += size;
947 	return XGE_HAL_OK;
948 }
949 
950 /**
951  * xge_hal_fifo_dtr_buffer_finalize - Prepares a descriptor that contains the
952  * single physically contiguous buffer.
953  *
954  * @channelh: Channel handle.
955  * @dtrh: Descriptor handle.
956  * @frag_idx: Index of the data buffer in the Txdl list.
957  *
958  * This API in conjuction with xge_hal_fifo_dtr_buffer_append() prepares
959  * a descriptor that consists of a single physically contiguous buffer
960  * which inturn contains the contents of one or more virtually contiguous
961  * buffers received from the upper layer.
962  *
963  * See Also: xge_hal_fifo_dtr_buffer_append().
964 */
965 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,int frag_idx)966 xge_hal_fifo_dtr_buffer_finalize(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
967 		int frag_idx)
968 {
969 	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
970 	xge_hal_fifo_txdl_priv_t *txdl_priv;
971 	xge_hal_fifo_txd_t *txdp;
972 	ptrdiff_t prev_boff;
973 
974 	xge_assert(frag_idx < fifo->config->max_frags);
975 
976 	txdl_priv = __hal_fifo_txdl_priv(dtrh);
977 	txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;
978 
979 	if (frag_idx != 0) {
980 		txdp->control_1 = txdp->control_2 = 0;
981 	}
982 
983 	prev_boff = txdl_priv->align_vaddr_start - txdl_priv->align_vaddr;
984 	txdp->buffer_pointer = (u64)txdl_priv->align_dma_addr + prev_boff;
985 	txdp->control_1 |=
986                 XGE_HAL_TXD_BUFFER0_SIZE(txdl_priv->align_dma_offset);
987 	txdl_priv->bytes_sent += (unsigned int)txdl_priv->align_dma_offset;
988 	fifo->channel.stats.total_buffers++;
989 	fifo->channel.stats.copied_buffers++;
990 	txdl_priv->frags++;
991 	txdl_priv->align_used_frags++;
992 
993 #if defined(XGE_OS_DMA_REQUIRES_SYNC)
994 	/* sync pre-mapped buffer */
995 	xge_os_dma_sync(fifo->channel.pdev,
996 		      txdl_priv->align_dma_handle,
997 		      txdp->buffer_pointer,
998 		      0,
999 		      txdl_priv->align_dma_offset,
1000 		      XGE_OS_DMA_DIR_TODEVICE);
1001 #endif
1002 
1003 	/* increment vaddr_start for the next buffer_append() iteration */
1004 	txdl_priv->align_vaddr_start += txdl_priv->align_dma_offset;
1005         txdl_priv->align_dma_offset = 0;
1006 }
1007 
1008 /**
1009  * xge_hal_fifo_dtr_buffer_set - Set transmit buffer pointer in the
1010  * descriptor.
1011  * @channelh: Channel handle.
1012  * @dtrh: Descriptor handle.
1013  * @frag_idx: Index of the data buffer in the caller's scatter-gather list�
1014  *            (of buffers).
1015  * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1016  * @size: Size of the data buffer (in bytes).
1017  *
1018  * This API is part of the preparation of the transmit descriptor for posting
1019  * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
1020  * xge_hal_fifo_dtr_mss_set() and xge_hal_fifo_dtr_cksum_set_bits().
1021  * All three APIs fill in the fields of the fifo descriptor,
1022  * in accordance with the Xframe specification.
1023  *
1024  * See also: xge_hal_fifo_dtr_buffer_set_aligned(),
1025  * xge_hal_check_alignment().
1026  *
1027  * See also: xge_hal_fifo_dtr_reserve(), xge_hal_fifo_dtr_post(),
1028  * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_cksum_set_bits()
1029  * Prepare transmit descriptor for transmission (via
1030  * xge_hal_fifo_dtr_post()).
1031  * See also: xge_hal_fifo_dtr_vlan_set().
1032  * Note: Compare with xge_hal_fifo_dtr_buffer_set_aligned().
1033  *
1034  * Usage: See ex_xmit{}.
1035  */
1036 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_buffer_set(xge_hal_channel_h channelh,xge_hal_dtr_h dtrh,int frag_idx,dma_addr_t dma_pointer,int size)1037 xge_hal_fifo_dtr_buffer_set(xge_hal_channel_h channelh, xge_hal_dtr_h dtrh,
1038 		int frag_idx, dma_addr_t dma_pointer, int size)
1039 {
1040 	xge_hal_fifo_t *fifo = (xge_hal_fifo_t *)channelh;
1041 	xge_hal_fifo_txdl_priv_t *txdl_priv;
1042 	xge_hal_fifo_txd_t *txdp;
1043 
1044 	txdl_priv = __hal_fifo_txdl_priv(dtrh);
1045 	txdp = (xge_hal_fifo_txd_t *)dtrh + txdl_priv->frags;
1046 
1047 	if (frag_idx != 0) {
1048 		txdp->control_1 = txdp->control_2 = 0;
1049 	}
1050 
1051 	/* Note:
1052 	 * it is the responsibility of upper layers and not HAL
1053 	 * detect it and skip zero-size fragment
1054 	 */
1055 	xge_assert(size > 0);
1056 	xge_assert(frag_idx < txdl_priv->alloc_frags);
1057 
1058 	txdp->buffer_pointer = (u64)dma_pointer;
1059 	txdp->control_1 |= XGE_HAL_TXD_BUFFER0_SIZE(size);
1060 	txdl_priv->bytes_sent += size;
1061 	fifo->channel.stats.total_buffers++;
1062 	txdl_priv->frags++;
1063 }
1064 
1065 /**
1066  * xge_hal_fifo_dtr_mss_set - Set MSS.
1067  * @dtrh: Descriptor handle.
1068  * @mss: MSS size for _this_ TCP connection. Passed by TCP stack down to the
1069  *       ULD, which in turn inserts the MSS into the @dtrh.
1070  *
1071  * This API is part of the preparation of the transmit descriptor for posting
1072  * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
1073  * xge_hal_fifo_dtr_buffer_set(), xge_hal_fifo_dtr_buffer_set_aligned(),
1074  * and xge_hal_fifo_dtr_cksum_set_bits().
1075  * All these APIs fill in the fields of the fifo descriptor,
1076  * in accordance with the Xframe specification.
1077  *
1078  * See also: xge_hal_fifo_dtr_reserve(),
1079  * xge_hal_fifo_dtr_post(), xge_hal_fifo_dtr_vlan_set().
1080  * Usage: See ex_xmit{}.
1081  */
1082 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_mss_set(xge_hal_dtr_h dtrh,int mss)1083 xge_hal_fifo_dtr_mss_set(xge_hal_dtr_h dtrh, int mss)
1084 {
1085 	xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
1086 
1087 	txdp->control_1 |= XGE_HAL_TXD_LSO_COF_CTRL(XGE_HAL_TXD_TCP_LSO);
1088 	txdp->control_1 |= XGE_HAL_TXD_TCP_LSO_MSS(mss);
1089 }
1090 
1091 /**
1092  * xge_hal_fifo_dtr_cksum_set_bits - Offload checksum.
1093  * @dtrh: Descriptor handle.
1094  * @cksum_bits: Specifies which checksums are to be offloaded: IPv4,
1095  *              and/or TCP and/or UDP.
1096  *
1097  * Ask Xframe to calculate IPv4 & transport checksums for _this_ transmit
1098  * descriptor.
1099  * This API is part of the preparation of the transmit descriptor for posting
1100  * (via xge_hal_fifo_dtr_post()). The related "preparation" APIs include
1101  * xge_hal_fifo_dtr_mss_set(), xge_hal_fifo_dtr_buffer_set_aligned(),
1102  * and xge_hal_fifo_dtr_buffer_set().
1103  * All these APIs fill in the fields of the fifo descriptor,
1104  * in accordance with the Xframe specification.
1105  *
1106  * See also: xge_hal_fifo_dtr_reserve(),
1107  * xge_hal_fifo_dtr_post(), XGE_HAL_TXD_TX_CKO_IPV4_EN,
1108  * XGE_HAL_TXD_TX_CKO_TCP_EN.
1109  * Usage: See ex_xmit{}.
1110  */
1111 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_cksum_set_bits(xge_hal_dtr_h dtrh,u64 cksum_bits)1112 xge_hal_fifo_dtr_cksum_set_bits(xge_hal_dtr_h dtrh, u64 cksum_bits)
1113 {
1114 	xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
1115 
1116 	txdp->control_2 |= cksum_bits;
1117 }
1118 
1119 
1120 /**
1121  * xge_hal_fifo_dtr_vlan_set - Set VLAN tag.
1122  * @dtrh: Descriptor handle.
1123  * @vlan_tag: 16bit VLAN tag.
1124  *
1125  * Insert VLAN tag into specified transmit descriptor.
1126  * The actual insertion of the tag into outgoing frame is done by the hardware.
1127  * See also: xge_hal_fifo_dtr_buffer_set(), xge_hal_fifo_dtr_mss_set().
1128  */
1129 __HAL_STATIC_FIFO __HAL_INLINE_FIFO void
xge_hal_fifo_dtr_vlan_set(xge_hal_dtr_h dtrh,u16 vlan_tag)1130 xge_hal_fifo_dtr_vlan_set(xge_hal_dtr_h dtrh, u16 vlan_tag)
1131 {
1132 	xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
1133 
1134 	txdp->control_2 |= XGE_HAL_TXD_VLAN_ENABLE;
1135 	txdp->control_2 |= XGE_HAL_TXD_VLAN_TAG(vlan_tag);
1136 }
1137 
1138 /**
1139  * xge_hal_fifo_is_next_dtr_completed - Checks if the next dtr is completed
1140  * @channelh: Channel handle.
1141  */
1142 __HAL_STATIC_FIFO __HAL_INLINE_FIFO xge_hal_status_e
xge_hal_fifo_is_next_dtr_completed(xge_hal_channel_h channelh)1143 xge_hal_fifo_is_next_dtr_completed(xge_hal_channel_h channelh)
1144 {
1145 	xge_hal_fifo_txd_t *txdp;
1146 	xge_hal_dtr_h dtrh;
1147 
1148 	__hal_channel_dtr_try_complete(channelh, &dtrh);
1149 	txdp = (xge_hal_fifo_txd_t *)dtrh;
1150 	if (txdp == NULL) {
1151 		return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1152 	}
1153 
1154 	/* check whether host owns it */
1155 	if ( !(txdp->control_1 & XGE_HAL_TXD_LIST_OWN_XENA) ) {
1156 		xge_assert(txdp->host_control!=0);
1157 		return XGE_HAL_OK;
1158 	}
1159 
1160 	/* no more completions */
1161 	return XGE_HAL_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1162 }
1163