patch-2.2.4 linux/net/ipv4/tcp_ipv4.c
Next file: linux/net/ipv4/tcp_output.c
Previous file: linux/net/ipv4/tcp_input.c
Back to the patch index
Back to the overall index
- Lines: 117
- Date:
Sun Mar 21 07:22:00 1999
- Orig file:
v2.2.3/linux/net/ipv4/tcp_ipv4.c
- Orig date:
Wed Mar 10 15:29:52 1999
diff -u --recursive --new-file v2.2.3/linux/net/ipv4/tcp_ipv4.c linux/net/ipv4/tcp_ipv4.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp_ipv4.c,v 1.166 1999/02/23 08:12:41 davem Exp $
+ * Version: $Id: tcp_ipv4.c,v 1.170 1999/03/21 05:22:47 davem Exp $
*
* IPv4 specific functions
*
@@ -657,7 +657,6 @@
static int tcp_v4_sendmsg(struct sock *sk, struct msghdr *msg, int len)
{
- struct tcp_opt *tp;
int retval = -EINVAL;
/* Do sanity checking for sendmsg/sendto/send. */
@@ -679,15 +678,7 @@
if (addr->sin_addr.s_addr != sk->daddr)
goto out;
}
-
- lock_sock(sk);
- retval = tcp_do_sendmsg(sk, msg->msg_iovlen, msg->msg_iov,
- msg->msg_flags);
- /* Push out partial tail frames if needed. */
- tp = &(sk->tp_pinfo.af_tcp);
- if(tp->send_head && tcp_snd_test(sk, tp->send_head))
- tcp_write_xmit(sk);
- release_sock(sk);
+ retval = tcp_do_sendmsg(sk, msg);
out:
return retval;
@@ -735,6 +726,9 @@
{
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
+ if (atomic_read(&sk->sock_readers))
+ return;
+
/* Don't interested in TCP_LISTEN and open_requests (SYN-ACKs
* send out by Linux are always <576bytes so they should go through
* unfragmented).
@@ -748,19 +742,18 @@
* There is a small race when the user changes this flag in the
* route, but I think that's acceptable.
*/
- if (sk->ip_pmtudisc != IP_PMTUDISC_DONT && sk->dst_cache) {
- if (tp->pmtu_cookie > sk->dst_cache->pmtu &&
- !atomic_read(&sk->sock_readers)) {
- tcp_sync_mss(sk, sk->dst_cache->pmtu);
-
- /* Resend the TCP packet because it's
- * clear that the old packet has been
- * dropped. This is the new "fast" path mtu
- * discovery.
- */
- tcp_simple_retransmit(sk);
- } /* else let the usual retransmit timer handle it */
- }
+ if (sk->dst_cache &&
+ sk->ip_pmtudisc != IP_PMTUDISC_DONT &&
+ tp->pmtu_cookie > sk->dst_cache->pmtu) {
+ tcp_sync_mss(sk, sk->dst_cache->pmtu);
+
+ /* Resend the TCP packet because it's
+ * clear that the old packet has been
+ * dropped. This is the new "fast" path mtu
+ * discovery.
+ */
+ tcp_simple_retransmit(sk);
+ } /* else let the usual retransmit timer handle it */
}
/*
@@ -787,6 +780,11 @@
struct tcp_opt *tp;
int type = skb->h.icmph->type;
int code = skb->h.icmph->code;
+#if ICMP_MIN_LENGTH < 14
+ int no_flags = 0;
+#else
+#define no_flags 0
+#endif
struct sock *sk;
__u32 seq;
int err;
@@ -795,6 +793,10 @@
icmp_statistics.IcmpInErrors++;
return;
}
+#if ICMP_MIN_LENGTH < 14
+ if (len < (iph->ihl << 2) + 14)
+ no_flags = 1;
+#endif
th = (struct tcphdr*)(dp+(iph->ihl<<2));
@@ -861,7 +863,7 @@
* ACK should set the opening flag, but that is too
* complicated right now.
*/
- if (!th->syn && !th->ack)
+ if (!no_flags && !th->syn && !th->ack)
return;
req = tcp_v4_search_req(tp, iph, th, &prev);
@@ -896,7 +898,7 @@
break;
case TCP_SYN_SENT:
case TCP_SYN_RECV: /* Cannot happen */
- if (!th->syn)
+ if (!no_flags && !th->syn)
return;
tcp_statistics.TcpAttemptFails++;
sk->err = err;
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)