1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2015 by Delphix. All rights reserved.
24 */
25
26 #include <sys/types.h>
27 #include <sys/stream.h>
28 #include <sys/strsun.h>
29 #include <sys/strsubr.h>
30 #include <sys/debug.h>
31 #include <sys/sdt.h>
32 #include <sys/cmn_err.h>
33 #include <sys/tihdr.h>
34
35 #include <inet/common.h>
36 #include <inet/optcom.h>
37 #include <inet/ip.h>
38 #include <inet/ip_if.h>
39 #include <inet/ip_impl.h>
40 #include <inet/tcp.h>
41 #include <inet/tcp_impl.h>
42 #include <inet/ipsec_impl.h>
43 #include <inet/ipclassifier.h>
44 #include <inet/ipp_common.h>
45 #include <inet/ip_if.h>
46
47 /*
48 * This file implements TCP fusion - a protocol-less data path for TCP
49 * loopback connections. The fusion of two local TCP endpoints occurs
50 * at connection establishment time. Various conditions (see details
51 * in tcp_fuse()) need to be met for fusion to be successful. If it
52 * fails, we fall back to the regular TCP data path; if it succeeds,
53 * both endpoints proceed to use tcp_fuse_output() as the transmit path.
54 * tcp_fuse_output() enqueues application data directly onto the peer's
55 * receive queue; no protocol processing is involved.
56 *
57 * Sychronization is handled by squeue and the mutex tcp_non_sq_lock.
58 * One of the requirements for fusion to succeed is that both endpoints
59 * need to be using the same squeue. This ensures that neither side
60 * can disappear while the other side is still sending data. Flow
61 * control information is manipulated outside the squeue, so the
62 * tcp_non_sq_lock must be held when touching tcp_flow_stopped.
63 */
64
65 /*
66 * Setting this to false means we disable fusion altogether and
67 * loopback connections would go through the protocol paths.
68 */
69 boolean_t do_tcp_fusion = B_TRUE;
70
71 /*
72 * This routine gets called by the eager tcp upon changing state from
73 * SYN_RCVD to ESTABLISHED. It fuses a direct path between itself
74 * and the active connect tcp such that the regular tcp processings
75 * may be bypassed under allowable circumstances. Because the fusion
76 * requires both endpoints to be in the same squeue, it does not work
77 * for simultaneous active connects because there is no easy way to
78 * switch from one squeue to another once the connection is created.
79 * This is different from the eager tcp case where we assign it the
80 * same squeue as the one given to the active connect tcp during open.
81 */
82 void
tcp_fuse(tcp_t * tcp,uchar_t * iphdr,tcpha_t * tcpha)83 tcp_fuse(tcp_t *tcp, uchar_t *iphdr, tcpha_t *tcpha)
84 {
85 conn_t *peer_connp, *connp = tcp->tcp_connp;
86 tcp_t *peer_tcp;
87 tcp_stack_t *tcps = tcp->tcp_tcps;
88 netstack_t *ns;
89 ip_stack_t *ipst = tcps->tcps_netstack->netstack_ip;
90
91 ASSERT(!tcp->tcp_fused);
92 ASSERT(tcp->tcp_loopback);
93 ASSERT(tcp->tcp_loopback_peer == NULL);
94 /*
95 * We need to inherit conn_rcvbuf of the listener tcp,
96 * but we can't really use tcp_listener since we get here after
97 * sending up T_CONN_IND and tcp_tli_accept() may be called
98 * independently, at which point tcp_listener is cleared;
99 * this is why we use tcp_saved_listener. The listener itself
100 * is guaranteed to be around until tcp_accept_finish() is called
101 * on this eager -- this won't happen until we're done since we're
102 * inside the eager's perimeter now.
103 */
104 ASSERT(tcp->tcp_saved_listener != NULL);
105 /*
106 * Lookup peer endpoint; search for the remote endpoint having
107 * the reversed address-port quadruplet in ESTABLISHED state,
108 * which is guaranteed to be unique in the system. Zone check
109 * is applied accordingly for loopback address, but not for
110 * local address since we want fusion to happen across Zones.
111 */
112 if (connp->conn_ipversion == IPV4_VERSION) {
113 peer_connp = ipcl_conn_tcp_lookup_reversed_ipv4(connp,
114 (ipha_t *)iphdr, tcpha, ipst);
115 } else {
116 peer_connp = ipcl_conn_tcp_lookup_reversed_ipv6(connp,
117 (ip6_t *)iphdr, tcpha, ipst);
118 }
119
120 /*
121 * We can only proceed if peer exists, resides in the same squeue
122 * as our conn and is not raw-socket. We also restrict fusion to
123 * endpoints of the same type (STREAMS or non-STREAMS). The squeue
124 * assignment of this eager tcp was done earlier at the time of SYN
125 * processing in ip_fanout_tcp{_v6}. Note that similar squeues by
126 * itself doesn't guarantee a safe condition to fuse, hence we perform
127 * additional tests below.
128 */
129 ASSERT(peer_connp == NULL || peer_connp != connp);
130 if (peer_connp == NULL || peer_connp->conn_sqp != connp->conn_sqp ||
131 !IPCL_IS_TCP(peer_connp) ||
132 IPCL_IS_NONSTR(connp) != IPCL_IS_NONSTR(peer_connp)) {
133 if (peer_connp != NULL) {
134 TCP_STAT(tcps, tcp_fusion_unqualified);
135 CONN_DEC_REF(peer_connp);
136 }
137 return;
138 }
139 peer_tcp = peer_connp->conn_tcp; /* active connect tcp */
140
141 ASSERT(peer_tcp != NULL && peer_tcp != tcp && !peer_tcp->tcp_fused);
142 ASSERT(peer_tcp->tcp_loopback_peer == NULL);
143 ASSERT(peer_connp->conn_sqp == connp->conn_sqp);
144
145 /*
146 * Due to IRE changes the peer and us might not agree on tcp_loopback.
147 * We bail in that case.
148 */
149 if (!peer_tcp->tcp_loopback) {
150 TCP_STAT(tcps, tcp_fusion_unqualified);
151 CONN_DEC_REF(peer_connp);
152 return;
153 }
154 /*
155 * Fuse the endpoints; we perform further checks against both
156 * tcp endpoints to ensure that a fusion is allowed to happen.
157 */
158 ns = tcps->tcps_netstack;
159 ipst = ns->netstack_ip;
160
161 if (!tcp->tcp_unfusable && !peer_tcp->tcp_unfusable &&
162 tcp->tcp_xmit_head == NULL && peer_tcp->tcp_xmit_head == NULL) {
163 mblk_t *mp = NULL;
164 queue_t *peer_rq = peer_connp->conn_rq;
165
166 ASSERT(!TCP_IS_DETACHED(peer_tcp));
167 ASSERT(tcp->tcp_fused_sigurg_mp == NULL);
168 ASSERT(peer_tcp->tcp_fused_sigurg_mp == NULL);
169
170 /*
171 * We need to drain data on both endpoints during unfuse.
172 * If we need to send up SIGURG at the time of draining,
173 * we want to be sure that an mblk is readily available.
174 * This is why we pre-allocate the M_PCSIG mblks for both
175 * endpoints which will only be used during/after unfuse.
176 * The mblk might already exist if we are doing a re-fuse.
177 */
178 if (!IPCL_IS_NONSTR(tcp->tcp_connp)) {
179 ASSERT(!IPCL_IS_NONSTR(peer_tcp->tcp_connp));
180
181 if (tcp->tcp_fused_sigurg_mp == NULL) {
182 if ((mp = allocb(1, BPRI_HI)) == NULL)
183 goto failed;
184 tcp->tcp_fused_sigurg_mp = mp;
185 }
186
187 if (peer_tcp->tcp_fused_sigurg_mp == NULL) {
188 if ((mp = allocb(1, BPRI_HI)) == NULL)
189 goto failed;
190 peer_tcp->tcp_fused_sigurg_mp = mp;
191 }
192
193 if ((mp = allocb(sizeof (struct stroptions),
194 BPRI_HI)) == NULL)
195 goto failed;
196 }
197
198 /* Fuse both endpoints */
199 peer_tcp->tcp_loopback_peer = tcp;
200 tcp->tcp_loopback_peer = peer_tcp;
201 peer_tcp->tcp_fused = tcp->tcp_fused = B_TRUE;
202
203 /*
204 * We never use regular tcp paths in fusion and should
205 * therefore clear tcp_unsent on both endpoints. Having
206 * them set to non-zero values means asking for trouble
207 * especially after unfuse, where we may end up sending
208 * through regular tcp paths which expect xmit_list and
209 * friends to be correctly setup.
210 */
211 peer_tcp->tcp_unsent = tcp->tcp_unsent = 0;
212
213 tcp_timers_stop(tcp);
214 tcp_timers_stop(peer_tcp);
215
216 /*
217 * Set receive buffer and max packet size for the
218 * active open tcp.
219 * eager's values will be set in tcp_accept_finish.
220 */
221 (void) tcp_rwnd_set(peer_tcp, peer_tcp->tcp_connp->conn_rcvbuf);
222
223 /*
224 * Set the write offset value to zero since we won't
225 * be needing any room for TCP/IP headers.
226 */
227 if (!IPCL_IS_NONSTR(peer_tcp->tcp_connp)) {
228 struct stroptions *stropt;
229
230 DB_TYPE(mp) = M_SETOPTS;
231 mp->b_wptr += sizeof (*stropt);
232
233 stropt = (struct stroptions *)mp->b_rptr;
234 stropt->so_flags = SO_WROFF | SO_MAXBLK;
235 stropt->so_wroff = 0;
236 stropt->so_maxblk = INFPSZ;
237
238 /* Send the options up */
239 putnext(peer_rq, mp);
240 } else {
241 struct sock_proto_props sopp;
242
243 /* The peer is a non-STREAMS end point */
244 ASSERT(IPCL_IS_TCP(peer_connp));
245
246 sopp.sopp_flags = SOCKOPT_WROFF | SOCKOPT_MAXBLK;
247 sopp.sopp_wroff = 0;
248 sopp.sopp_maxblk = INFPSZ;
249 (*peer_connp->conn_upcalls->su_set_proto_props)
250 (peer_connp->conn_upper_handle, &sopp);
251 }
252 } else {
253 TCP_STAT(tcps, tcp_fusion_unqualified);
254 }
255 CONN_DEC_REF(peer_connp);
256 return;
257
258 failed:
259 if (tcp->tcp_fused_sigurg_mp != NULL) {
260 freeb(tcp->tcp_fused_sigurg_mp);
261 tcp->tcp_fused_sigurg_mp = NULL;
262 }
263 if (peer_tcp->tcp_fused_sigurg_mp != NULL) {
264 freeb(peer_tcp->tcp_fused_sigurg_mp);
265 peer_tcp->tcp_fused_sigurg_mp = NULL;
266 }
267 CONN_DEC_REF(peer_connp);
268 }
269
270 /*
271 * Unfuse a previously-fused pair of tcp loopback endpoints.
272 */
273 void
tcp_unfuse(tcp_t * tcp)274 tcp_unfuse(tcp_t *tcp)
275 {
276 tcp_t *peer_tcp = tcp->tcp_loopback_peer;
277 tcp_stack_t *tcps = tcp->tcp_tcps;
278
279 ASSERT(tcp->tcp_fused && peer_tcp != NULL);
280 ASSERT(peer_tcp->tcp_fused && peer_tcp->tcp_loopback_peer == tcp);
281 ASSERT(tcp->tcp_connp->conn_sqp == peer_tcp->tcp_connp->conn_sqp);
282 ASSERT(tcp->tcp_unsent == 0 && peer_tcp->tcp_unsent == 0);
283
284 /*
285 * Cancel any pending push timers.
286 */
287 if (tcp->tcp_push_tid != 0) {
288 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid);
289 tcp->tcp_push_tid = 0;
290 }
291 if (peer_tcp->tcp_push_tid != 0) {
292 (void) TCP_TIMER_CANCEL(peer_tcp, peer_tcp->tcp_push_tid);
293 peer_tcp->tcp_push_tid = 0;
294 }
295
296 /*
297 * Drain any pending data; Note that in case of a detached tcp, the
298 * draining will happen later after the tcp is unfused. For non-
299 * urgent data, this can be handled by the regular tcp_rcv_drain().
300 * If we have urgent data sitting in the receive list, we will
301 * need to send up a SIGURG signal first before draining the data.
302 * All of these will be handled by the code in tcp_fuse_rcv_drain()
303 * when called from tcp_rcv_drain().
304 */
305 if (!TCP_IS_DETACHED(tcp)) {
306 (void) tcp_fuse_rcv_drain(tcp->tcp_connp->conn_rq, tcp,
307 &tcp->tcp_fused_sigurg_mp);
308 }
309 if (!TCP_IS_DETACHED(peer_tcp)) {
310 (void) tcp_fuse_rcv_drain(peer_tcp->tcp_connp->conn_rq,
311 peer_tcp, &peer_tcp->tcp_fused_sigurg_mp);
312 }
313
314 /* Lift up any flow-control conditions */
315 mutex_enter(&tcp->tcp_non_sq_lock);
316 if (tcp->tcp_flow_stopped) {
317 tcp_clrqfull(tcp);
318 TCP_STAT(tcps, tcp_fusion_backenabled);
319 }
320 mutex_exit(&tcp->tcp_non_sq_lock);
321
322 mutex_enter(&peer_tcp->tcp_non_sq_lock);
323 if (peer_tcp->tcp_flow_stopped) {
324 tcp_clrqfull(peer_tcp);
325 TCP_STAT(tcps, tcp_fusion_backenabled);
326 }
327 mutex_exit(&peer_tcp->tcp_non_sq_lock);
328
329 /*
330 * Update tha_seq and tha_ack in the header template
331 */
332 tcp->tcp_tcpha->tha_seq = htonl(tcp->tcp_snxt);
333 tcp->tcp_tcpha->tha_ack = htonl(tcp->tcp_rnxt);
334 peer_tcp->tcp_tcpha->tha_seq = htonl(peer_tcp->tcp_snxt);
335 peer_tcp->tcp_tcpha->tha_ack = htonl(peer_tcp->tcp_rnxt);
336
337 /* Unfuse the endpoints */
338 peer_tcp->tcp_fused = tcp->tcp_fused = B_FALSE;
339 peer_tcp->tcp_loopback_peer = tcp->tcp_loopback_peer = NULL;
340 }
341
342 /*
343 * Fusion output routine used to handle urgent data sent by STREAMS based
344 * endpoints. This routine is called by tcp_fuse_output() for handling
345 * non-M_DATA mblks.
346 */
347 void
tcp_fuse_output_urg(tcp_t * tcp,mblk_t * mp)348 tcp_fuse_output_urg(tcp_t *tcp, mblk_t *mp)
349 {
350 mblk_t *mp1;
351 struct T_exdata_ind *tei;
352 tcp_t *peer_tcp = tcp->tcp_loopback_peer;
353 mblk_t *head, *prev_head = NULL;
354 tcp_stack_t *tcps = tcp->tcp_tcps;
355
356 ASSERT(tcp->tcp_fused);
357 ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp);
358 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
359 ASSERT(DB_TYPE(mp) == M_PROTO || DB_TYPE(mp) == M_PCPROTO);
360 ASSERT(mp->b_cont != NULL && DB_TYPE(mp->b_cont) == M_DATA);
361 ASSERT(MBLKL(mp) >= sizeof (*tei) && MBLKL(mp->b_cont) > 0);
362
363 /*
364 * Urgent data arrives in the form of T_EXDATA_REQ from above.
365 * Each occurence denotes a new urgent pointer. For each new
366 * urgent pointer we signal (SIGURG) the receiving app to indicate
367 * that it needs to go into urgent mode. This is similar to the
368 * urgent data handling in the regular tcp. We don't need to keep
369 * track of where the urgent pointer is, because each T_EXDATA_REQ
370 * "advances" the urgent pointer for us.
371 *
372 * The actual urgent data carried by T_EXDATA_REQ is then prepended
373 * by a T_EXDATA_IND before being enqueued behind any existing data
374 * destined for the receiving app. There is only a single urgent
375 * pointer (out-of-band mark) for a given tcp. If the new urgent
376 * data arrives before the receiving app reads some existing urgent
377 * data, the previous marker is lost. This behavior is emulated
378 * accordingly below, by removing any existing T_EXDATA_IND messages
379 * and essentially converting old urgent data into non-urgent.
380 */
381 ASSERT(tcp->tcp_valid_bits & TCP_URG_VALID);
382 /* Let sender get out of urgent mode */
383 tcp->tcp_valid_bits &= ~TCP_URG_VALID;
384
385 /*
386 * This flag indicates that a signal needs to be sent up.
387 * This flag will only get cleared once SIGURG is delivered and
388 * is not affected by the tcp_fused flag -- delivery will still
389 * happen even after an endpoint is unfused, to handle the case
390 * where the sending endpoint immediately closes/unfuses after
391 * sending urgent data and the accept is not yet finished.
392 */
393 peer_tcp->tcp_fused_sigurg = B_TRUE;
394
395 /* Reuse T_EXDATA_REQ mblk for T_EXDATA_IND */
396 DB_TYPE(mp) = M_PROTO;
397 tei = (struct T_exdata_ind *)mp->b_rptr;
398 tei->PRIM_type = T_EXDATA_IND;
399 tei->MORE_flag = 0;
400 mp->b_wptr = (uchar_t *)&tei[1];
401
402 TCP_STAT(tcps, tcp_fusion_urg);
403 TCPS_BUMP_MIB(tcps, tcpOutUrg);
404
405 head = peer_tcp->tcp_rcv_list;
406 while (head != NULL) {
407 /*
408 * Remove existing T_EXDATA_IND, keep the data which follows
409 * it and relink our list. Note that we don't modify the
410 * tcp_rcv_last_tail since it never points to T_EXDATA_IND.
411 */
412 if (DB_TYPE(head) != M_DATA) {
413 mp1 = head;
414
415 ASSERT(DB_TYPE(mp1->b_cont) == M_DATA);
416 head = mp1->b_cont;
417 mp1->b_cont = NULL;
418 head->b_next = mp1->b_next;
419 mp1->b_next = NULL;
420 if (prev_head != NULL)
421 prev_head->b_next = head;
422 if (peer_tcp->tcp_rcv_list == mp1)
423 peer_tcp->tcp_rcv_list = head;
424 if (peer_tcp->tcp_rcv_last_head == mp1)
425 peer_tcp->tcp_rcv_last_head = head;
426 freeb(mp1);
427 }
428 prev_head = head;
429 head = head->b_next;
430 }
431 }
432
433 /*
434 * Fusion output routine, called by tcp_output() and tcp_wput_proto().
435 * If we are modifying any member that can be changed outside the squeue,
436 * like tcp_flow_stopped, we need to take tcp_non_sq_lock.
437 */
438 boolean_t
tcp_fuse_output(tcp_t * tcp,mblk_t * mp,uint32_t send_size)439 tcp_fuse_output(tcp_t *tcp, mblk_t *mp, uint32_t send_size)
440 {
441 conn_t *connp = tcp->tcp_connp;
442 tcp_t *peer_tcp = tcp->tcp_loopback_peer;
443 conn_t *peer_connp = peer_tcp->tcp_connp;
444 boolean_t flow_stopped, peer_data_queued = B_FALSE;
445 boolean_t urgent = (DB_TYPE(mp) != M_DATA);
446 boolean_t push = B_TRUE;
447 mblk_t *mp1 = mp;
448 uint_t ip_hdr_len;
449 uint32_t recv_size = send_size;
450 tcp_stack_t *tcps = tcp->tcp_tcps;
451 netstack_t *ns = tcps->tcps_netstack;
452 ip_stack_t *ipst = ns->netstack_ip;
453 ipsec_stack_t *ipss = ns->netstack_ipsec;
454 iaflags_t ixaflags = connp->conn_ixa->ixa_flags;
455 boolean_t do_ipsec, hooks_out, hooks_in, ipobs_enabled;
456
457 ASSERT(tcp->tcp_fused);
458 ASSERT(peer_tcp != NULL && peer_tcp->tcp_loopback_peer == tcp);
459 ASSERT(connp->conn_sqp == peer_connp->conn_sqp);
460 ASSERT(DB_TYPE(mp) == M_DATA || DB_TYPE(mp) == M_PROTO ||
461 DB_TYPE(mp) == M_PCPROTO);
462
463 if (send_size == 0) {
464 freemsg(mp);
465 return (B_TRUE);
466 }
467
468 /*
469 * Handle urgent data; we either send up SIGURG to the peer now
470 * or do it later when we drain, in case the peer is detached
471 * or if we're short of memory for M_PCSIG mblk.
472 */
473 if (urgent) {
474 tcp_fuse_output_urg(tcp, mp);
475
476 mp1 = mp->b_cont;
477 }
478
479 /*
480 * Check that we are still using an IRE_LOCAL or IRE_LOOPBACK before
481 * further processes.
482 */
483 if (!ip_output_verify_local(connp->conn_ixa))
484 goto unfuse;
485
486 /*
487 * Build IP and TCP header in case we have something that needs the
488 * headers. Those cases are:
489 * 1. IPsec
490 * 2. IPobs
491 * 3. FW_HOOKS
492 *
493 * If tcp_xmit_mp() fails to dupb() the message, unfuse the connection
494 * and back to regular path.
495 */
496 if (ixaflags & IXAF_IS_IPV4) {
497 do_ipsec = (ixaflags & IXAF_IPSEC_SECURE) ||
498 CONN_INBOUND_POLICY_PRESENT(peer_connp, ipss);
499
500 hooks_out = HOOKS4_INTERESTED_LOOPBACK_OUT(ipst);
501 hooks_in = HOOKS4_INTERESTED_LOOPBACK_IN(ipst);
502 ipobs_enabled = (ipst->ips_ip4_observe.he_interested != 0);
503 } else {
504 do_ipsec = (ixaflags & IXAF_IPSEC_SECURE) ||
505 CONN_INBOUND_POLICY_PRESENT_V6(peer_connp, ipss);
506
507 hooks_out = HOOKS6_INTERESTED_LOOPBACK_OUT(ipst);
508 hooks_in = HOOKS6_INTERESTED_LOOPBACK_IN(ipst);
509 ipobs_enabled = (ipst->ips_ip6_observe.he_interested != 0);
510 }
511
512 /* We do logical 'or' for efficiency */
513 if (ipobs_enabled | do_ipsec | hooks_in | hooks_out) {
514 if ((mp1 = tcp_xmit_mp(tcp, mp1, tcp->tcp_mss, NULL, NULL,
515 tcp->tcp_snxt, B_TRUE, NULL, B_FALSE)) == NULL)
516 /* If tcp_xmit_mp fails, use regular path */
517 goto unfuse;
518
519 /*
520 * Leave all IP relevant processes to ip_output_process_local(),
521 * which handles IPsec, IPobs, and FW_HOOKS.
522 */
523 mp1 = ip_output_process_local(mp1, connp->conn_ixa, hooks_out,
524 hooks_in, do_ipsec ? peer_connp : NULL);
525
526 /* If the message is dropped for any reason. */
527 if (mp1 == NULL)
528 goto unfuse;
529
530 /*
531 * Data length might have been changed by FW_HOOKS.
532 * We assume that the first mblk contains the TCP/IP headers.
533 */
534 if (hooks_in || hooks_out) {
535 tcpha_t *tcpha;
536
537 ip_hdr_len = (ixaflags & IXAF_IS_IPV4) ?
538 IPH_HDR_LENGTH((ipha_t *)mp1->b_rptr) :
539 ip_hdr_length_v6(mp1, (ip6_t *)mp1->b_rptr);
540
541 tcpha = (tcpha_t *)&mp1->b_rptr[ip_hdr_len];
542 ASSERT((uchar_t *)tcpha + sizeof (tcpha_t) <=
543 mp1->b_wptr);
544 recv_size += htonl(tcpha->tha_seq) - tcp->tcp_snxt;
545
546 }
547
548 /*
549 * The message duplicated by tcp_xmit_mp is freed.
550 * Note: the original message passed in remains unchanged.
551 */
552 freemsg(mp1);
553 }
554
555 /*
556 * Enqueue data into the peer's receive list; we may or may not
557 * drain the contents depending on the conditions below.
558 *
559 * For non-STREAMS sockets we normally queue data directly in the
560 * socket by calling the su_recv upcall. However, if the peer is
561 * detached we use tcp_rcv_enqueue() instead. Queued data will be
562 * drained when the accept completes (in tcp_accept_finish()).
563 */
564 if (IPCL_IS_NONSTR(peer_connp) &&
565 !TCP_IS_DETACHED(peer_tcp)) {
566 int error;
567 int flags = 0;
568
569 if ((tcp->tcp_valid_bits & TCP_URG_VALID) &&
570 (tcp->tcp_urg == tcp->tcp_snxt)) {
571 flags = MSG_OOB;
572 (*peer_connp->conn_upcalls->su_signal_oob)
573 (peer_connp->conn_upper_handle, 0);
574 tcp->tcp_valid_bits &= ~TCP_URG_VALID;
575 }
576 if ((*peer_connp->conn_upcalls->su_recv)(
577 peer_connp->conn_upper_handle, mp, recv_size,
578 flags, &error, &push) < 0) {
579 ASSERT(error != EOPNOTSUPP);
580 peer_data_queued = B_TRUE;
581 }
582 } else {
583 if (IPCL_IS_NONSTR(peer_connp) &&
584 (tcp->tcp_valid_bits & TCP_URG_VALID) &&
585 (tcp->tcp_urg == tcp->tcp_snxt)) {
586 /*
587 * Can not deal with urgent pointers
588 * that arrive before the connection has been
589 * accept()ed.
590 */
591 tcp->tcp_valid_bits &= ~TCP_URG_VALID;
592 freemsg(mp);
593 return (B_TRUE);
594 }
595
596 tcp_rcv_enqueue(peer_tcp, mp, recv_size,
597 tcp->tcp_connp->conn_cred);
598
599 /* In case it wrapped around and also to keep it constant */
600 peer_tcp->tcp_rwnd += recv_size;
601 }
602
603 /*
604 * Exercise flow-control when needed; we will get back-enabled
605 * in either tcp_accept_finish(), tcp_unfuse(), or when data is
606 * consumed. If peer endpoint is detached, we emulate streams flow
607 * control by checking the peer's queue size and high water mark;
608 * otherwise we simply use canputnext() to decide if we need to stop
609 * our flow.
610 *
611 * Since we are accessing our tcp_flow_stopped and might modify it,
612 * we need to take tcp->tcp_non_sq_lock.
613 */
614 mutex_enter(&tcp->tcp_non_sq_lock);
615 flow_stopped = tcp->tcp_flow_stopped;
616 if ((TCP_IS_DETACHED(peer_tcp) &&
617 (peer_tcp->tcp_rcv_cnt >= peer_connp->conn_rcvbuf)) ||
618 (!TCP_IS_DETACHED(peer_tcp) &&
619 !IPCL_IS_NONSTR(peer_connp) && !canputnext(peer_connp->conn_rq))) {
620 peer_data_queued = B_TRUE;
621 }
622
623 if (!flow_stopped && (peer_data_queued ||
624 (TCP_UNSENT_BYTES(tcp) >= connp->conn_sndbuf))) {
625 tcp_setqfull(tcp);
626 flow_stopped = B_TRUE;
627 TCP_STAT(tcps, tcp_fusion_flowctl);
628 DTRACE_PROBE3(tcp__fuse__output__flowctl, tcp_t *, tcp,
629 uint_t, send_size, uint_t, peer_tcp->tcp_rcv_cnt);
630 } else if (flow_stopped && !peer_data_queued &&
631 (TCP_UNSENT_BYTES(tcp) <= connp->conn_sndlowat)) {
632 tcp_clrqfull(tcp);
633 TCP_STAT(tcps, tcp_fusion_backenabled);
634 flow_stopped = B_FALSE;
635 }
636 mutex_exit(&tcp->tcp_non_sq_lock);
637
638 ipst->ips_loopback_packets++;
639 tcp->tcp_last_sent_len = send_size;
640
641 /* Need to adjust the following SNMP MIB-related variables */
642 tcp->tcp_snxt += send_size;
643 tcp->tcp_suna = tcp->tcp_snxt;
644 peer_tcp->tcp_rnxt += recv_size;
645 peer_tcp->tcp_last_recv_len = recv_size;
646 peer_tcp->tcp_rack = peer_tcp->tcp_rnxt;
647
648 TCPS_BUMP_MIB(tcps, tcpOutDataSegs);
649 TCPS_BUMP_MIB(tcps, tcpHCOutSegs);
650 TCPS_UPDATE_MIB(tcps, tcpOutDataBytes, send_size);
651 tcp->tcp_cs.tcp_out_data_bytes += send_size;
652 tcp->tcp_cs.tcp_out_data_segs++;
653
654 TCPS_BUMP_MIB(tcps, tcpHCInSegs);
655 TCPS_BUMP_MIB(tcps, tcpInDataInorderSegs);
656 TCPS_UPDATE_MIB(tcps, tcpInDataInorderBytes, send_size);
657 peer_tcp->tcp_cs.tcp_in_data_inorder_bytes += send_size;
658 peer_tcp->tcp_cs.tcp_in_data_inorder_segs++;
659
660 DTRACE_TCP5(send, void, NULL, ip_xmit_attr_t *, connp->conn_ixa,
661 __dtrace_tcp_void_ip_t *, NULL, tcp_t *, tcp,
662 __dtrace_tcp_tcph_t *, NULL);
663 DTRACE_TCP5(receive, void, NULL, ip_xmit_attr_t *,
664 peer_connp->conn_ixa, __dtrace_tcp_void_ip_t *, NULL,
665 tcp_t *, peer_tcp, __dtrace_tcp_tcph_t *, NULL);
666
667 if (!IPCL_IS_NONSTR(peer_tcp->tcp_connp) &&
668 !TCP_IS_DETACHED(peer_tcp)) {
669 /*
670 * Drain the peer's receive queue it has urgent data or if
671 * we're not flow-controlled.
672 */
673 if (urgent || !flow_stopped) {
674 ASSERT(peer_tcp->tcp_rcv_list != NULL);
675 /*
676 * For TLI-based streams, a thread in tcp_accept_swap()
677 * can race with us. That thread will ensure that the
678 * correct peer_connp->conn_rq is globally visible
679 * before peer_tcp->tcp_detached is visible as clear,
680 * but we must also ensure that the load of conn_rq
681 * cannot be reordered to be before the tcp_detached
682 * check.
683 */
684 membar_consumer();
685 (void) tcp_fuse_rcv_drain(peer_connp->conn_rq, peer_tcp,
686 NULL);
687 }
688 }
689 return (B_TRUE);
690 unfuse:
691 tcp_unfuse(tcp);
692 return (B_FALSE);
693 }
694
695 /*
696 * This routine gets called to deliver data upstream on a fused or
697 * previously fused tcp loopback endpoint; the latter happens only
698 * when there is a pending SIGURG signal plus urgent data that can't
699 * be sent upstream in the past.
700 */
701 boolean_t
tcp_fuse_rcv_drain(queue_t * q,tcp_t * tcp,mblk_t ** sigurg_mpp)702 tcp_fuse_rcv_drain(queue_t *q, tcp_t *tcp, mblk_t **sigurg_mpp)
703 {
704 mblk_t *mp;
705 conn_t *connp = tcp->tcp_connp;
706
707 #ifdef DEBUG
708 uint_t cnt = 0;
709 #endif
710 tcp_stack_t *tcps = tcp->tcp_tcps;
711 tcp_t *peer_tcp = tcp->tcp_loopback_peer;
712
713 ASSERT(tcp->tcp_loopback);
714 ASSERT(tcp->tcp_fused || tcp->tcp_fused_sigurg);
715 ASSERT(!tcp->tcp_fused || tcp->tcp_loopback_peer != NULL);
716 ASSERT(IPCL_IS_NONSTR(connp) || sigurg_mpp != NULL || tcp->tcp_fused);
717
718 /* No need for the push timer now, in case it was scheduled */
719 if (tcp->tcp_push_tid != 0) {
720 (void) TCP_TIMER_CANCEL(tcp, tcp->tcp_push_tid);
721 tcp->tcp_push_tid = 0;
722 }
723 /*
724 * If there's urgent data sitting in receive list and we didn't
725 * get a chance to send up a SIGURG signal, make sure we send
726 * it first before draining in order to ensure that SIOCATMARK
727 * works properly.
728 */
729 if (tcp->tcp_fused_sigurg) {
730 ASSERT(!IPCL_IS_NONSTR(tcp->tcp_connp));
731
732 tcp->tcp_fused_sigurg = B_FALSE;
733 /*
734 * sigurg_mpp is normally NULL, i.e. when we're still
735 * fused and didn't get here because of tcp_unfuse().
736 * In this case try hard to allocate the M_PCSIG mblk.
737 */
738 if (sigurg_mpp == NULL &&
739 (mp = allocb(1, BPRI_HI)) == NULL &&
740 (mp = allocb_tryhard(1)) == NULL) {
741 /* Alloc failed; try again next time */
742 tcp->tcp_push_tid = TCP_TIMER(tcp,
743 tcp_push_timer, tcps->tcps_push_timer_interval);
744 return (B_TRUE);
745 } else if (sigurg_mpp != NULL) {
746 /*
747 * Use the supplied M_PCSIG mblk; it means we're
748 * either unfused or in the process of unfusing,
749 * and the drain must happen now.
750 */
751 mp = *sigurg_mpp;
752 *sigurg_mpp = NULL;
753 }
754 ASSERT(mp != NULL);
755
756 /* Send up the signal */
757 DB_TYPE(mp) = M_PCSIG;
758 *mp->b_wptr++ = (uchar_t)SIGURG;
759 putnext(q, mp);
760
761 /*
762 * Let the regular tcp_rcv_drain() path handle
763 * draining the data if we're no longer fused.
764 */
765 if (!tcp->tcp_fused)
766 return (B_FALSE);
767 }
768
769 /* Drain the data */
770 while ((mp = tcp->tcp_rcv_list) != NULL) {
771 tcp->tcp_rcv_list = mp->b_next;
772 mp->b_next = NULL;
773 #ifdef DEBUG
774 cnt += msgdsize(mp);
775 #endif
776 ASSERT(!IPCL_IS_NONSTR(connp));
777 putnext(q, mp);
778 TCP_STAT(tcps, tcp_fusion_putnext);
779 }
780
781 #ifdef DEBUG
782 ASSERT(cnt == tcp->tcp_rcv_cnt);
783 #endif
784 tcp->tcp_rcv_last_head = NULL;
785 tcp->tcp_rcv_last_tail = NULL;
786 tcp->tcp_rcv_cnt = 0;
787 tcp->tcp_rwnd = tcp->tcp_connp->conn_rcvbuf;
788
789 mutex_enter(&peer_tcp->tcp_non_sq_lock);
790 if (peer_tcp->tcp_flow_stopped && (TCP_UNSENT_BYTES(peer_tcp) <=
791 peer_tcp->tcp_connp->conn_sndlowat)) {
792 tcp_clrqfull(peer_tcp);
793 TCP_STAT(tcps, tcp_fusion_backenabled);
794 }
795 mutex_exit(&peer_tcp->tcp_non_sq_lock);
796
797 return (B_TRUE);
798 }
799
800 /*
801 * Calculate the size of receive buffer for a fused tcp endpoint.
802 */
803 size_t
tcp_fuse_set_rcv_hiwat(tcp_t * tcp,size_t rwnd)804 tcp_fuse_set_rcv_hiwat(tcp_t *tcp, size_t rwnd)
805 {
806 tcp_stack_t *tcps = tcp->tcp_tcps;
807 uint32_t max_win;
808
809 ASSERT(tcp->tcp_fused);
810
811 /* Ensure that value is within the maximum upper bound */
812 if (rwnd > tcps->tcps_max_buf)
813 rwnd = tcps->tcps_max_buf;
814 /*
815 * Round up to system page size in case SO_RCVBUF is modified
816 * after SO_SNDBUF; the latter is also similarly rounded up.
817 */
818 rwnd = P2ROUNDUP_TYPED(rwnd, PAGESIZE, size_t);
819 max_win = TCP_MAXWIN << tcp->tcp_rcv_ws;
820 if (rwnd > max_win) {
821 rwnd = max_win - (max_win % tcp->tcp_mss);
822 if (rwnd < tcp->tcp_mss)
823 rwnd = max_win;
824 }
825
826 /*
827 * Record high water mark, this is used for flow-control
828 * purposes in tcp_fuse_output().
829 */
830 tcp->tcp_connp->conn_rcvbuf = rwnd;
831 tcp->tcp_rwnd = rwnd;
832 return (rwnd);
833 }
834
835 /*
836 * Calculate the maximum outstanding unread data block for a fused tcp endpoint.
837 */
838 int
tcp_fuse_maxpsz(tcp_t * tcp)839 tcp_fuse_maxpsz(tcp_t *tcp)
840 {
841 tcp_t *peer_tcp = tcp->tcp_loopback_peer;
842 conn_t *connp = tcp->tcp_connp;
843 uint_t sndbuf = connp->conn_sndbuf;
844 uint_t maxpsz = sndbuf;
845
846 ASSERT(tcp->tcp_fused);
847 ASSERT(peer_tcp != NULL);
848 ASSERT(peer_tcp->tcp_connp->conn_rcvbuf != 0);
849 /*
850 * In the fused loopback case, we want the stream head to split
851 * up larger writes into smaller chunks for a more accurate flow-
852 * control accounting. Our maxpsz is half of the sender's send
853 * buffer or the receiver's receive buffer, whichever is smaller.
854 * We round up the buffer to system page size due to the lack of
855 * TCP MSS concept in Fusion.
856 */
857 if (maxpsz > peer_tcp->tcp_connp->conn_rcvbuf)
858 maxpsz = peer_tcp->tcp_connp->conn_rcvbuf;
859 maxpsz = P2ROUNDUP_TYPED(maxpsz, PAGESIZE, uint_t) >> 1;
860
861 return (maxpsz);
862 }
863
864 /*
865 * Called to release flow control.
866 */
867 void
tcp_fuse_backenable(tcp_t * tcp)868 tcp_fuse_backenable(tcp_t *tcp)
869 {
870 tcp_t *peer_tcp = tcp->tcp_loopback_peer;
871
872 ASSERT(tcp->tcp_fused);
873 ASSERT(peer_tcp != NULL && peer_tcp->tcp_fused);
874 ASSERT(peer_tcp->tcp_loopback_peer == tcp);
875 ASSERT(!TCP_IS_DETACHED(tcp));
876 ASSERT(tcp->tcp_connp->conn_sqp ==
877 peer_tcp->tcp_connp->conn_sqp);
878
879 if (tcp->tcp_rcv_list != NULL)
880 (void) tcp_fuse_rcv_drain(tcp->tcp_connp->conn_rq, tcp, NULL);
881
882 mutex_enter(&peer_tcp->tcp_non_sq_lock);
883 if (peer_tcp->tcp_flow_stopped &&
884 (TCP_UNSENT_BYTES(peer_tcp) <=
885 peer_tcp->tcp_connp->conn_sndlowat)) {
886 tcp_clrqfull(peer_tcp);
887 }
888 mutex_exit(&peer_tcp->tcp_non_sq_lock);
889
890 TCP_STAT(tcp->tcp_tcps, tcp_fusion_backenabled);
891 }
892