summaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorYOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org>2006-11-14 19:07:45 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-12-02 21:22:39 -0800
commitcfb6eeb4c860592edd123fdea908d23c6ad1c7dc (patch)
tree361c073622faa540ef6602ef1b0a6e8c0a17fc60 /net/ipv4
parentbf6bce71eae386dbc37f93af7e5ad173450d9945 (diff)
[TCP]: MD5 Signature Option (RFC2385) support.
Based on implementation by Rick Payne. Signed-off-by: YOSHIFUJI Hideaki <yoshfuji@linux-ipv6.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/Kconfig16
-rw-r--r--net/ipv4/tcp.c137
-rw-r--r--net/ipv4/tcp_input.c8
-rw-r--r--net/ipv4/tcp_ipv4.c673
-rw-r--r--net/ipv4/tcp_minisocks.c64
-rw-r--r--net/ipv4/tcp_output.c111
6 files changed, 973 insertions, 36 deletions
diff --git a/net/ipv4/Kconfig b/net/ipv4/Kconfig
index bc298bcc344e..39e0cb763588 100644
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -618,5 +618,21 @@ config DEFAULT_TCP_CONG
default "reno" if DEFAULT_RENO
default "cubic"
+config TCP_MD5SIG
+ bool "TCP: MD5 Signature Option support (RFC2385) (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ select CRYPTO
+ select CRYPTO_MD5
+ ---help---
+ RFC2385 specifices a method of giving MD5 protection to TCP sessions.
+ Its main (only?) use is to protect BGP sessions between core routers
+ on the Internet.
+
+ If unsure, say N.
+
+config TCP_MD5SIG_DEBUG
+ bool "TCP: MD5 Signature Option debugging"
+ depends on TCP_MD5SIG
+
source "net/ipv4/ipvs/Kconfig"
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c05e8edaf544..dadef867a3bb 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -258,6 +258,7 @@
#include <linux/bootmem.h>
#include <linux/cache.h>
#include <linux/err.h>
+#include <linux/crypto.h>
#include <net/icmp.h>
#include <net/tcp.h>
@@ -1942,6 +1943,13 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
}
break;
+#ifdef CONFIG_TCP_MD5SIG
+ case TCP_MD5SIG:
+ /* Read the IP->Key mappings from userspace */
+ err = tp->af_specific->md5_parse(sk, optval, optlen);
+ break;
+#endif
+
default:
err = -ENOPROTOOPT;
break;
@@ -2231,6 +2239,135 @@ out:
}
EXPORT_SYMBOL(tcp_tso_segment);
+#ifdef CONFIG_TCP_MD5SIG
+static unsigned long tcp_md5sig_users;
+static struct tcp_md5sig_pool **tcp_md5sig_pool;
+static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
+
+static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
+{
+ int cpu;
+ for_each_possible_cpu(cpu) {
+ struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
+ if (p) {
+ if (p->md5_desc.tfm)
+ crypto_free_hash(p->md5_desc.tfm);
+ kfree(p);
+ p = NULL;
+ }
+ }
+ free_percpu(pool);
+}
+
+void tcp_free_md5sig_pool(void)
+{
+ struct tcp_md5sig_pool **pool = NULL;
+
+ spin_lock(&tcp_md5sig_pool_lock);
+ if (--tcp_md5sig_users == 0) {
+ pool = tcp_md5sig_pool;
+ tcp_md5sig_pool = NULL;
+ }
+ spin_unlock(&tcp_md5sig_pool_lock);
+ if (pool)
+ __tcp_free_md5sig_pool(pool);
+}
+
+EXPORT_SYMBOL(tcp_free_md5sig_pool);
+
+struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
+{
+ int cpu;
+ struct tcp_md5sig_pool **pool;
+
+ pool = alloc_percpu(struct tcp_md5sig_pool *);
+ if (!pool)
+ return NULL;
+
+ for_each_possible_cpu(cpu) {
+ struct tcp_md5sig_pool *p;
+ struct crypto_hash *hash;
+
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ goto out_free;
+ *per_cpu_ptr(pool, cpu) = p;
+
+ hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
+ if (!hash || IS_ERR(hash))
+ goto out_free;
+
+ p->md5_desc.tfm = hash;
+ }
+ return pool;
+out_free:
+ __tcp_free_md5sig_pool(pool);
+ return NULL;
+}
+
+struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
+{
+ struct tcp_md5sig_pool **pool;
+ int alloc = 0;
+
+retry:
+ spin_lock(&tcp_md5sig_pool_lock);
+ pool = tcp_md5sig_pool;
+ if (tcp_md5sig_users++ == 0) {
+ alloc = 1;
+ spin_unlock(&tcp_md5sig_pool_lock);
+ } else if (!pool) {
+ tcp_md5sig_users--;
+ spin_unlock(&tcp_md5sig_pool_lock);
+ cpu_relax();
+ goto retry;
+ } else
+ spin_unlock(&tcp_md5sig_pool_lock);
+
+ if (alloc) {
+ /* we cannot hold spinlock here because this may sleep. */
+ struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
+ spin_lock(&tcp_md5sig_pool_lock);
+ if (!p) {
+ tcp_md5sig_users--;
+ spin_unlock(&tcp_md5sig_pool_lock);
+ return NULL;
+ }
+ pool = tcp_md5sig_pool;
+ if (pool) {
+ /* oops, it has already been assigned. */
+ spin_unlock(&tcp_md5sig_pool_lock);
+ __tcp_free_md5sig_pool(p);
+ } else {
+ tcp_md5sig_pool = pool = p;
+ spin_unlock(&tcp_md5sig_pool_lock);
+ }
+ }
+ return pool;
+}
+
+EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
+
+struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
+{
+ struct tcp_md5sig_pool **p;
+ spin_lock(&tcp_md5sig_pool_lock);
+ p = tcp_md5sig_pool;
+ if (p)
+ tcp_md5sig_users++;
+ spin_unlock(&tcp_md5sig_pool_lock);
+ return (p ? *per_cpu_ptr(p, cpu) : NULL);
+}
+
+EXPORT_SYMBOL(__tcp_get_md5sig_pool);
+
+void __tcp_put_md5sig_pool(void) {
+ __tcp_free_md5sig_pool(tcp_md5sig_pool);
+}
+
+EXPORT_SYMBOL(__tcp_put_md5sig_pool);
+#endif
+
extern void __skb_cb_too_small_for_tcp(int, int);
extern struct tcp_congestion_ops tcp_reno;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4a8c96cdec7d..6ab3423674bb 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2677,6 +2677,14 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
opt_rx->sack_ok) {
TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
}
+#ifdef CONFIG_TCP_MD5SIG
+ case TCPOPT_MD5SIG:
+ /*
+ * The MD5 Hash has already been
+ * checked (see tcp_v{4,6}_do_rcv()).
+ */
+ break;
+#endif
};
ptr+=opsize-2;
length-=opsize;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 0ad0904bf56c..8c8e8112f98d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -78,6 +78,9 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/crypto.h>
+#include <linux/scatterlist.h>
+
int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly;
@@ -89,6 +92,13 @@ static struct socket *tcp_socket;
void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
+#ifdef CONFIG_TCP_MD5SIG
+static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr);
+static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+ __be32 saddr, __be32 daddr, struct tcphdr *th,
+ int protocol, int tcplen);
+#endif
+
struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
.lhash_lock = __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),
.lhash_users = ATOMIC_INIT(0),
@@ -526,11 +536,19 @@ int tcp_v4_gso_send_check(struct sk_buff *skb)
* Exception: precedence violation. We do not implement it in any case.
*/
-static void tcp_v4_send_reset(struct sk_buff *skb)
+static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
{
struct tcphdr *th = skb->h.th;
- struct tcphdr rth;
+ struct {
+ struct tcphdr th;
+#ifdef CONFIG_TCP_MD5SIG
+ u32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
+#endif
+ } rep;
struct ip_reply_arg arg;
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *key;
+#endif
/* Never send a reset in response to a reset. */
if (th->rst)
@@ -540,29 +558,50 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
return;
/* Swap the send and the receive. */
- memset(&rth, 0, sizeof(struct tcphdr));
- rth.dest = th->source;
- rth.source = th->dest;
- rth.doff = sizeof(struct tcphdr) / 4;
- rth.rst = 1;
+ memset(&rep, 0, sizeof(rep));
+ rep.th.dest = th->source;
+ rep.th.source = th->dest;
+ rep.th.doff = sizeof(struct tcphdr) / 4;
+ rep.th.rst = 1;
if (th->ack) {
- rth.seq = th->ack_seq;
+ rep.th.seq = th->ack_seq;
} else {
- rth.ack = 1;
- rth.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
- skb->len - (th->doff << 2));
+ rep.th.ack = 1;
+ rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
+ skb->len - (th->doff << 2));
}
memset(&arg, 0, sizeof arg);
- arg.iov[0].iov_base = (unsigned char *)&rth;
- arg.iov[0].iov_len = sizeof rth;
+ arg.iov[0].iov_base = (unsigned char *)&rep;
+ arg.iov[0].iov_len = sizeof(rep.th);
+
+#ifdef CONFIG_TCP_MD5SIG
+ key = sk ? tcp_v4_md5_do_lookup(sk, skb->nh.iph->daddr) : NULL;
+ if (key) {
+ rep.opt[0] = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_MD5SIG << 8) |
+ TCPOLEN_MD5SIG);
+ /* Update length and the length the header thinks exists */
+ arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
+ rep.th.doff = arg.iov[0].iov_len / 4;
+
+ tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[1],
+ key,
+ skb->nh.iph->daddr,
+ skb->nh.iph->saddr,
+ &rep.th, IPPROTO_TCP,
+ arg.iov[0].iov_len);
+ }
+#endif
+
arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
skb->nh.iph->saddr, /*XXX*/
sizeof(struct tcphdr), IPPROTO_TCP, 0);
arg.csumoffset = offsetof(struct tcphdr, check) / 2;
- ip_send_reply(tcp_socket->sk, skb, &arg, sizeof rth);
+ ip_send_reply(tcp_socket->sk, skb, &arg, arg.iov[0].iov_len);
TCP_INC_STATS_BH(TCP_MIB_OUTSEGS);
TCP_INC_STATS_BH(TCP_MIB_OUTRSTS);
@@ -572,15 +611,24 @@ static void tcp_v4_send_reset(struct sk_buff *skb)
outside socket context is ugly, certainly. What can I do?
*/
-static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
+static void tcp_v4_send_ack(struct tcp_timewait_sock *twsk,
+ struct sk_buff *skb, u32 seq, u32 ack,
u32 win, u32 ts)
{
struct tcphdr *th = skb->h.th;
struct {
struct tcphdr th;
- u32 tsopt[TCPOLEN_TSTAMP_ALIGNED >> 2];
+ u32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
+#ifdef CONFIG_TCP_MD5SIG
+ + (TCPOLEN_MD5SIG_ALIGNED >> 2)
+#endif
+ ];
} rep;
struct ip_reply_arg arg;
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *key;
+ struct tcp_md5sig_key tw_key;
+#endif
memset(&rep.th, 0, sizeof(struct tcphdr));
memset(&arg, 0, sizeof arg);
@@ -588,12 +636,12 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
arg.iov[0].iov_base = (unsigned char *)&rep;
arg.iov[0].iov_len = sizeof(rep.th);
if (ts) {
- rep.tsopt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
- (TCPOPT_TIMESTAMP << 8) |
- TCPOLEN_TIMESTAMP);
- rep.tsopt[1] = htonl(tcp_time_stamp);
- rep.tsopt[2] = htonl(ts);
- arg.iov[0].iov_len = sizeof(rep);
+ rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
+ (TCPOPT_TIMESTAMP << 8) |
+ TCPOLEN_TIMESTAMP);
+ rep.opt[1] = htonl(tcp_time_stamp);
+ rep.opt[2] = htonl(ts);
+ arg.iov[0].iov_len = TCPOLEN_TSTAMP_ALIGNED;
}
/* Swap the send and the receive. */
@@ -605,6 +653,44 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
rep.th.ack = 1;
rep.th.window = htons(win);
+#ifdef CONFIG_TCP_MD5SIG
+ /*
+ * The SKB holds an imcoming packet, but may not have a valid ->sk
+ * pointer. This is especially the case when we're dealing with a
+ * TIME_WAIT ack, because the sk structure is long gone, and only
+ * the tcp_timewait_sock remains. So the md5 key is stashed in that
+ * structure, and we use it in preference. I believe that (twsk ||
+ * skb->sk) holds true, but we program defensively.
+ */
+ if (!twsk && skb->sk) {
+ key = tcp_v4_md5_do_lookup(skb->sk, skb->nh.iph->daddr);
+ } else if (twsk && twsk->tw_md5_keylen) {
+ tw_key.key = twsk->tw_md5_key;
+ tw_key.keylen = twsk->tw_md5_keylen;
+ key = &tw_key;
+ } else {
+ key = NULL;
+ }
+
+ if (key) {
+ int offset = (ts) ? 3 : 0;
+
+ rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_MD5SIG << 8) |
+ TCPOLEN_MD5SIG);
+ arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
+ rep.th.doff = arg.iov[0].iov_len/4;
+
+ tcp_v4_do_calc_md5_hash((__u8 *)&rep.opt[offset],
+ key,
+ skb->nh.iph->daddr,
+ skb->nh.iph->saddr,
+ &rep.th, IPPROTO_TCP,
+ arg.iov[0].iov_len);
+ }
+#endif
+
arg.csum = csum_tcpudp_nofold(skb->nh.iph->daddr,
skb->nh.iph->saddr, /*XXX*/
arg.iov[0].iov_len, IPPROTO_TCP, 0);
@@ -618,9 +704,9 @@ static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
struct inet_timewait_sock *tw = inet_twsk(sk);
- const struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
+ struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
- tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
+ tcp_v4_send_ack(tcptw, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_ts_recent);
inet_twsk_put(tw);
@@ -628,7 +714,8 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
static void tcp_v4_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req)
{
- tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1, tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
+ tcp_v4_send_ack(NULL, skb, tcp_rsk(req)->snt_isn + 1,
+ tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
req->ts_recent);
}
@@ -714,6 +801,461 @@ static struct ip_options *tcp_v4_save_options(struct sock *sk,
return dopt;
}
+#ifdef CONFIG_TCP_MD5SIG
+/*
+ * RFC2385 MD5 checksumming requires a mapping of
+ * IP address->MD5 Key.
+ * We need to maintain these in the sk structure.
+ */
+
+/* Find the Key structure for an address. */
+static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, __be32 addr)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int i;
+
+ if (!tp->md5sig_info || !tp->md5sig_info->entries4)
+ return NULL;
+ for (i = 0; i < tp->md5sig_info->entries4; i++) {
+ if (tp->md5sig_info->keys4[i].addr == addr)
+ return (struct tcp_md5sig_key *)&tp->md5sig_info->keys4[i];
+ }
+ return NULL;
+}
+
+struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
+ struct sock *addr_sk)
+{
+ return tcp_v4_md5_do_lookup(sk, inet_sk(addr_sk)->daddr);
+}
+
+EXPORT_SYMBOL(tcp_v4_md5_lookup);
+
+struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
+ struct request_sock *req)
+{
+ return tcp_v4_md5_do_lookup(sk, inet_rsk(req)->rmt_addr);
+}
+
+/* This can be called on a newly created socket, from other files */
+int tcp_v4_md5_do_add(struct sock *sk, __be32 addr,
+ u8 *newkey, u8 newkeylen)
+{
+ /* Add Key to the list */
+ struct tcp4_md5sig_key *key;
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp4_md5sig_key *keys;
+
+ key = (struct tcp4_md5sig_key *) tcp_v4_md5_do_lookup(sk, addr);
+ if (key) {
+ /* Pre-existing entry - just update that one. */
+ kfree (key->key);
+ key->key = newkey;
+ key->keylen = newkeylen;
+ } else {
+ if (!tp->md5sig_info) {
+ tp->md5sig_info = kzalloc(sizeof(*tp->md5sig_info), GFP_ATOMIC);
+ if (!tp->md5sig_info) {
+ kfree(newkey);
+ return -ENOMEM;
+ }
+ }
+ if (tcp_alloc_md5sig_pool() == NULL) {
+ kfree(newkey);
+ return -ENOMEM;
+ }
+ if (tp->md5sig_info->alloced4 == tp->md5sig_info->entries4) {
+ keys = kmalloc((sizeof(struct tcp4_md5sig_key) *
+ (tp->md5sig_info->entries4 + 1)), GFP_ATOMIC);
+ if (!keys) {
+ kfree(newkey);
+ tcp_free_md5sig_pool();
+ return -ENOMEM;
+ }
+
+ if (tp->md5sig_info->entries4)
+ memcpy(keys, tp->md5sig_info->keys4,
+ (sizeof (struct tcp4_md5sig_key) *
+ tp->md5sig_info->entries4));
+
+ /* Free old key list, and reference new one */
+ if (tp->md5sig_info->keys4)
+ kfree(tp->md5sig_info->keys4);
+ tp->md5sig_info->keys4 = keys;
+ tp->md5sig_info->alloced4++;
+ }
+ tp->md5sig_info->entries4++;
+ tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].addr = addr;
+ tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].key = newkey;
+ tp->md5sig_info->keys4[tp->md5sig_info->entries4 - 1].keylen = newkeylen;
+ }
+ return 0;
+}
+
+EXPORT_SYMBOL(tcp_v4_md5_do_add);
+
+static int tcp_v4_md5_add_func(struct sock *sk, struct sock *addr_sk,
+ u8 *newkey, u8 newkeylen)
+{
+ return tcp_v4_md5_do_add(sk, inet_sk(addr_sk)->daddr,
+ newkey, newkeylen);
+}
+
+int tcp_v4_md5_do_del(struct sock *sk, __be32 addr)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ int i;
+
+ for (i = 0; i < tp->md5sig_info->entries4; i++) {
+ if (tp->md5sig_info->keys4[i].addr == addr) {
+ /* Free the key */
+ kfree(tp->md5sig_info->keys4[i].key);
+ tp->md5sig_info->entries4--;
+
+ if (tp->md5sig_info->entries4 == 0) {
+ kfree(tp->md5sig_info->keys4);
+ tp->md5sig_info->keys4 = NULL;
+ } else {
+ /* Need to do some manipulation */
+ if (tp->md5sig_info->entries4 != i)
+ memcpy(&tp->md5sig_info->keys4[i],
+ &tp->md5sig_info->keys4[i+1],
+ (tp->md5sig_info->entries4 - i)
+ * sizeof (struct tcp4_md5sig_key));
+ }
+ tcp_free_md5sig_pool();
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+EXPORT_SYMBOL(tcp_v4_md5_do_del);
+
+static void tcp_v4_clear_md5_list (struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ /* Free each key, then the set of key keys,
+ * the crypto element, and then decrement our
+ * hold on the last resort crypto.
+ */
+ if (tp->md5sig_info->entries4) {
+ int i;
+ for (i = 0; i < tp->md5sig_info->entries4; i++)
+ kfree(tp->md5sig_info->keys4[i].key);
+ tp->md5sig_info->entries4 = 0;
+ tcp_free_md5sig_pool();
+ }
+ if (tp->md5sig_info->keys4) {
+ kfree(tp->md5sig_info->keys4);
+ tp->md5sig_info->keys4 = NULL;
+ tp->md5sig_info->alloced4 = 0;
+ }
+}
+
+static int tcp_v4_parse_md5_keys (struct sock *sk, char __user *optval,
+ int optlen)
+{
+ struct tcp_md5sig cmd;
+ struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
+ u8 *newkey;
+
+ if (optlen < sizeof(cmd))
+ return -EINVAL;
+
+ if (copy_from_user (&cmd, optval, sizeof(cmd)))
+ return -EFAULT;
+
+ if (sin->sin_family != AF_INET)
+ return -EINVAL;
+
+ if (!cmd.tcpm_key || !cmd.tcpm_keylen) {
+ if (!tcp_sk(sk)->md5sig_info)
+ return -ENOENT;
+ return tcp_v4_md5_do_del(sk, sin->sin_addr.s_addr);
+ }
+
+ if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
+ return -EINVAL;
+
+ if (!tcp_sk(sk)->md5sig_info) {
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct tcp_md5sig_info *p;
+
+ p = kzalloc(sizeof(struct tcp_md5sig_info), GFP_KERNEL);
+ if (!p)
+ return -EINVAL;
+
+ tp->md5sig_info = p;
+
+ }
+
+ newkey = kmalloc(cmd.tcpm_keylen, GFP_KERNEL);
+ if (!newkey)
+ return -ENOMEM;
+ memcpy(newkey, cmd.tcpm_key, cmd.tcpm_keylen);
+ return tcp_v4_md5_do_add(sk, sin->sin_addr.s_addr,
+ newkey, cmd.tcpm_keylen);
+}
+
+static int tcp_v4_do_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+ __be32 saddr, __be32 daddr,
+ struct tcphdr *th, int protocol,
+ int tcplen)
+{
+ struct scatterlist sg[4];
+ __u16 data_len;
+ int block = 0;
+#ifdef CONFIG_TCP_MD5SIG_DEBUG
+ int i;
+#endif
+ __u16 old_checksum;
+ struct tcp_md5sig_pool *hp;
+ struct tcp4_pseudohdr *bp;
+ struct hash_desc *desc;
+ int err;
+ unsigned int nbytes = 0;
+
+ /*
+ * Okay, so RFC2385 is turned on for this connection,
+ * so we need to generate the MD5 hash for the packet now.
+ */
+
+ hp = tcp_get_md5sig_pool();
+ if (!hp)
+ goto clear_hash_noput;
+
+ bp = &hp->md5_blk.ip4;
+ desc = &hp->md5_desc;
+
+ /*
+ * 1. the TCP pseudo-header (in the order: source IP address,
+ * destination IP address, zero-padded protocol number, and
+ * segment length)
+ */
+ bp->saddr = saddr;
+ bp->daddr = daddr;
+ bp->pad = 0;
+ bp->protocol = protocol;
+ bp->len = htons(tcplen);
+ sg_set_buf(&sg[block++], bp, sizeof(*bp));
+ nbytes += sizeof(*bp);
+
+#ifdef CONFIG_TCP_MD5SIG_DEBUG
+ printk("Calcuating hash for: ");
+ for (i = 0; i < sizeof (*bp); i++)
+ printk ("%02x ", (unsigned int)((unsigned char *)bp)[i]);
+ printk(" ");
+#endif
+
+ /* 2. the TCP header, excluding options, and assuming a
+ * checksum of zero/
+ */
+ old_checksum = th->check;
+ th->check = 0;
+ sg_set_buf(&sg[block++], th, sizeof(struct tcphdr));
+ nbytes += sizeof(struct tcphdr);
+#ifdef CONFIG_TCP_MD5SIG_DEBUG
+ for (i = 0; i < sizeof (struct tcphdr); i++)
+ printk (" %02x", (unsigned int)((unsigned char *)th)[i]);
+#endif
+ /* 3. the TCP segment data (if any) */
+ data_len = tcplen - (th->doff << 2);
+ if (data_len > 0) {
+ unsigned char *data = (unsigned char *)th + (th->doff << 2);
+ sg_set_buf(&sg[block++], data, data_len);
+ nbytes += data_len;
+ }
+
+ /* 4. an independently-specified key or password, known to both
+ * TCPs and presumably connection-specific
+ */
+ sg_set_buf(&sg[block++], key->key, key->keylen);
+ nbytes += key->keylen;
+
+#ifdef CONFIG_TCP_MD5SIG_DEBUG
+ printk (" and password: ");
+ for (i = 0; i < key->keylen; i++)
+ printk ("%02x ", (unsigned int)key->key[i]);
+#endif
+
+ /* Now store the Hash into the packet */
+ err = crypto_hash_init(desc);
+ if (err)
+ goto clear_hash;
+ err = crypto_hash_update(desc, sg, nbytes);
+ if (err)
+ goto clear_hash;
+ err = crypto_hash_final(desc, md5_hash);
+ if (err)
+ goto clear_hash;
+
+ /* Reset header, and free up the crypto */
+ tcp_put_md5sig_pool();
+ th->check = old_checksum;
+
+out:
+#ifdef CONFIG_TCP_MD5SIG_DEBUG
+ printk(" result:");
+ for (i = 0; i < 16; i++)
+ printk (" %02x", (unsigned int)(((u8*)md5_hash)[i]));
+ printk("\n");
+#endif
+ return 0;
+clear_hash:
+ tcp_put_md5sig_pool();
+clear_hash_noput:
+ memset(md5_hash, 0, 16);
+ goto out;
+}
+
+int tcp_v4_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
+ struct sock *sk,
+ struct dst_entry *dst,
+ struct request_sock *req,
+ struct tcphdr *th, int protocol,
+ int tcplen)
+{
+ __be32 saddr, daddr;
+
+ if (sk) {
+ saddr = inet_sk(sk)->saddr;
+ daddr = inet_sk(sk)->daddr;
+ } else {
+ struct rtable *rt = (struct rtable *)dst;
+ BUG_ON(!rt);
+ saddr = rt->rt_src;
+ daddr = rt->rt_dst;
+ }
+ return tcp_v4_do_calc_md5_hash(md5_hash, key,
+ saddr, daddr,
+ th, protocol, tcplen);
+}
+
+EXPORT_SYMBOL(tcp_v4_calc_md5_hash);
+
+static int tcp_v4_inbound_md5_hash (struct sock *sk, struct sk_buff *skb)
+{
+ /*
+ * This gets called for each TCP segment that arrives
+ * so we want to be efficient.
+ * We have 3 drop cases:
+ * o No MD5 hash and one expected.
+ * o MD5 hash and we're not expecting one.
+ * o MD5 hash and its wrong.
+ */
+ __u8 *hash_location = NULL;
+ struct tcp_md5sig_key *hash_expected;
+ struct iphdr *iph = skb->nh.iph;
+ struct tcphdr *th = skb->h.th;
+ int length = (th->doff << 2) - sizeof (struct tcphdr);
+ int genhash;
+ unsigned char *ptr;
+ unsigned char newhash[16];
+
+ hash_expected = tcp_v4_md5_do_lookup(sk, iph->saddr);
+
+ /*
+ * If the TCP option length is less than the TCP_MD5SIG
+ * option length, then we can shortcut
+ */
+ if (length < TCPOLEN_MD5SIG) {
+ if (hash_expected)
+ return 1;
+ else
+ return 0;
+ }
+
+ /* Okay, we can't shortcut - we have to grub through the options */
+ ptr = (unsigned char *)(th + 1);
+ while (length > 0) {
+ int opcode = *ptr++;
+ int opsize;
+
+ switch (opcode) {
+ case TCPOPT_EOL:
+ goto done_opts;
+ case TCPOPT_NOP:
+ length--;
+ continue;
+ default:
+ opsize = *ptr++;
+ if (opsize < 2)
+ goto done_opts;
+ if (opsize > length)
+ goto done_opts;
+
+ if (opcode == TCPOPT_MD5SIG) {
+ hash_location = ptr;
+ goto done_opts;
+ }
+ }
+ ptr += opsize-2;
+ length -= opsize;
+ }
+done_opts:
+ /* We've parsed the options - do we have a hash? */
+ if (!hash_expected && !hash_location)
+ return 0;
+
+ if (hash_expected && !hash_location) {
+ if (net_ratelimit()) {
+ printk(KERN_INFO "MD5 Hash NOT expected but found "
+ "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
+ NIPQUAD (iph->saddr), ntohs(th->source),
+ NIPQUAD (iph->daddr), ntohs(th->dest));
+ }
+ return 1;
+ }
+
+ if (!hash_expected && hash_location) {
+ if (net_ratelimit()) {
+ printk(KERN_INFO "MD5 Hash NOT expected but found "
+ "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)\n",
+ NIPQUAD (iph->saddr), ntohs(th->source),
+ NIPQUAD (iph->daddr), ntohs(th->dest));
+ }
+ return 1;
+ }
+
+ /* Okay, so this is hash_expected and hash_location -
+ * so we need to calculate the checksum.
+ */
+ genhash = tcp_v4_do_calc_md5_hash(newhash,
+ hash_expected,
+ iph->saddr, iph->daddr,
+ th, sk->sk_protocol,
+ skb->len);
+
+ if (genhash || memcmp(hash_location, newhash, 16) != 0) {
+ if (net_ratelimit()) {
+ printk(KERN_INFO "MD5 Hash failed for "
+ "(" NIPQUAD_FMT ", %d)->(" NIPQUAD_FMT ", %d)%s\n",
+ NIPQUAD (iph->saddr), ntohs(th->source),
+ NIPQUAD (iph->daddr), ntohs(th->dest),
+ genhash ? " tcp_v4_calc_md5_hash failed" : "");
+#ifdef CONFIG_TCP_MD5SIG_DEBUG
+ do {
+ int i;
+ printk("Received: ");
+ for (i = 0; i < 16; i++)
+ printk("%02x ", 0xff & (int)hash_location[i]);
+ printk("\n");
+ printk("Calculated: ");
+ for (i = 0; i < 16; i++)
+ printk("%02x ", 0xff & (int)newhash[i]);
+ printk("\n");
+ } while(0);
+#endif
+ }
+ return 1;
+ }
+ return 0;
+}
+
+#endif
+
struct request_sock_ops tcp_request_sock_ops __read_mostly = {
.family = PF_INET,
.obj_size = sizeof(struct tcp_request_sock),
@@ -723,9 +1265,16 @@ struct request_sock_ops tcp_request_sock_ops __read_mostly = {
.send_reset = tcp_v4_send_reset,
};
+struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
+#ifdef CONFIG_TCP_MD5SIG
+ .md5_lookup = tcp_v4_reqsk_md5_lookup,
+#endif
+};
+
static struct timewait_sock_ops tcp_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp_timewait_sock),
.twsk_unique = tcp_twsk_unique,
+ .twsk_destructor= tcp_twsk_destructor,
};
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
@@ -773,6 +1322,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
if (!req)
goto drop;
+#ifdef CONFIG_TCP_MD5SIG
+ tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
+#endif
+
tcp_clear_options(&tmp_opt);
tmp_opt.mss_clamp = 536;
tmp_opt.user_mss = tcp_sk(sk)->rx_opt.user_mss;
@@ -891,6 +1444,9 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
struct inet_sock *newinet;
struct tcp_sock *newtp;
struct sock *newsk;
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *key;
+#endif
if (sk_acceptq_is_full(sk))
goto exit_overflow;
@@ -925,6 +1481,24 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
tcp_initialize_rcv_mss(newsk);
+#ifdef CONFIG_TCP_MD5SIG
+ /* Copy over the MD5 key from the original socket */
+ if ((key = tcp_v4_md5_do_lookup(sk, newinet->daddr)) != NULL) {
+ /*
+ * We're using one, so create a matching key
+ * on the newsk structure. If we fail to get
+ * memory, then we end up not copying the key
+ * across. Shucks.
+ */
+ char *newkey = kmalloc(key->keylen, GFP_ATOMIC);
+ if (newkey) {
+ memcpy(newkey, key->key, key->keylen);
+ tcp_v4_md5_do_add(newsk, inet_sk(sk)->daddr,
+ newkey, key->keylen);
+ }
+ }
+#endif
+
__inet_hash(&tcp_hashinfo, newsk, 0);
__inet_inherit_port(&tcp_hashinfo, sk, newsk);
@@ -1000,10 +1574,24 @@ static int tcp_v4_checksum_init(struct sk_buff *skb)
*/
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
+ struct sock *rsk;
+#ifdef CONFIG_TCP_MD5SIG
+ /*
+ * We really want to reject the packet as early as possible
+ * if:
+ * o We're expecting an MD5'd packet and this is no MD5 tcp option
+ * o There is an MD5 option and we're not expecting one
+ */
+ if (tcp_v4_inbound_md5_hash (sk, skb))
+ goto discard;
+#endif
+
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
TCP_CHECK_TIMER(sk);
- if (tcp_rcv_established(sk, skb, skb->h.th, skb->len))
+ if (tcp_rcv_established(sk, skb, skb->h.th, skb->len)) {
+ rsk = sk;
goto reset;
+ }
TCP_CHECK_TIMER(sk);
return 0;
}
@@ -1017,20 +1605,24 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
goto discard;
if (nsk != sk) {
- if (tcp_child_process(sk, nsk, skb))
+ if (tcp_child_process(sk, nsk, skb)) {
+ rsk = nsk;
goto reset;
+ }
return 0;
}
}
TCP_CHECK_TIMER(sk);
- if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len))
+ if (tcp_rcv_state_process(sk, skb, skb->h.th, skb->len)) {
+ rsk = sk;
goto reset;
+ }
TCP_CHECK_TIMER(sk);
return 0;
reset:
- tcp_v4_send_reset(skb);
+ tcp_v4_send_reset(rsk, skb);
discard:
kfree_skb(skb);
/* Be careful here. If this function gets more complicated and
@@ -1139,7 +1731,7 @@ no_tcp_socket:
bad_packet:
TCP_INC_STATS_BH(TCP_MIB_INERRS);
} else {
- tcp_v4_send_reset(skb);
+ tcp_v4_send_reset(NULL, skb);
}
discard_it:
@@ -1262,6 +1854,15 @@ struct inet_connection_sock_af_ops ipv4_specific = {
#endif
};
+struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
+#ifdef CONFIG_TCP_MD5SIG
+ .md5_lookup = tcp_v4_md5_lookup,
+ .calc_md5_hash = tcp_v4_calc_md5_hash,
+ .md5_add = tcp_v4_md5_add_func,
+ .md5_parse = tcp_v4_parse_md5_keys,
+#endif
+};
+
/* NOTE: A lot of things set to zero explicitly by call to
* sk_alloc() so need not be done here.
*/
@@ -1301,6 +1902,9 @@ static int tcp_v4_init_sock(struct sock *sk)
icsk->icsk_af_ops = &ipv4_specific;
icsk->icsk_sync_mss = tcp_sync_mss;
+#ifdef CONFIG_TCP_MD5SIG
+ tp->af_specific = &tcp_sock_ipv4_specific;
+#endif
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
@@ -1324,6 +1928,15 @@ int tcp_v4_destroy_sock(struct sock *sk)
/* Cleans up our, hopefully empty, out_of_order_queue. */
__skb_queue_purge(&tp->out_of_order_queue);
+#ifdef CONFIG_TCP_MD5SIG
+ /* Clean up the MD5 key list, if any */
+ if (tp->md5sig_info) {
+ tcp_v4_clear_md5_list(sk);
+ kfree(tp->md5sig_info);
+ tp->md5sig_info = NULL;
+ }
+#endif
+
#ifdef CONFIG_NET_DMA
/* Cleans up our sk_async_wait_queue */
__skb_queue_purge(&sk->sk_async_wait_queue);
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 0163d9826907..ac55d8892cf1 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -306,6 +306,28 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tw->tw_ipv6only = np->ipv6only;
}
#endif
+
+#ifdef CONFIG_TCP_MD5SIG
+ /*
+ * The timewait bucket does not have the key DB from the
+ * sock structure. We just make a quick copy of the
+ * md5 key being used (if indeed we are using one)
+ * so the timewait ack generating code has the key.
+ */
+ do {
+ struct tcp_md5sig_key *key;
+ memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key));
+ tcptw->tw_md5_keylen = 0;
+ key = tp->af_specific->md5_lookup(sk, sk);
+ if (key != NULL) {
+ memcpy(&tcptw->tw_md5_key, key->key, key->keylen);
+ tcptw->tw_md5_keylen = key->keylen;
+ if (tcp_alloc_md5sig_pool() == NULL)
+ BUG();
+ }
+ } while(0);
+#endif
+
/* Linkage updates. */
__inet_twsk_hashdance(tw, sk, &tcp_hashinfo);
@@ -337,6 +359,17 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
tcp_done(sk);
}
+void tcp_twsk_destructor(struct sock *sk)
+{
+ struct tcp_timewait_sock *twsk = tcp_twsk(sk);
+#ifdef CONFIG_TCP_MD5SIG
+ if (twsk->tw_md5_keylen)
+ tcp_put_md5sig_pool();
+#endif
+}
+
+EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
+
/* This is not only more efficient than what we used to do, it eliminates
* a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
*
@@ -435,6 +468,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->rx_opt.ts_recent_stamp = 0;
newtp->tcp_header_len = sizeof(struct tcphdr);
}
+#ifdef CONFIG_TCP_MD5SIG
+ newtp->md5sig_info = NULL; /*XXX*/
+ if (newtp->af_specific->md5_lookup(sk, newsk))
+ newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
+#endif
if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
@@ -617,6 +655,30 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
req, NULL);
if (child == NULL)
goto listen_overflow;
+#ifdef CONFIG_TCP_MD5SIG
+ else {
+ /* Copy over the MD5 key from the original socket */
+ struct tcp_md5sig_key *key;
+ struct tcp_sock *tp = tcp_sk(sk);
+ key = tp->af_specific->md5_lookup(sk, child);
+ if (key != NULL) {
+ /*
+ * We're using one, so create a matching key on the
+ * newsk structure. If we fail to get memory then we
+ * end up not copying the key across. Shucks.
+ */
+ char *newkey = kmalloc(key->keylen, GFP_ATOMIC);
+ if (newkey) {
+ if (!tcp_alloc_md5sig_pool())
+ BUG();
+ memcpy(newkey, key->key, key->keylen);
+ tp->af_specific->md5_add(child, child,
+ newkey,
+ key->keylen);
+ }
+ }
+ }
+#endif
inet_csk_reqsk_queue_unlink(sk, req, prev);
inet_csk_reqsk_queue_removed(sk, req);
@@ -633,7 +695,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
embryonic_reset:
NET_INC_STATS_BH(LINUX_MIB_EMBRYONICRSTS);
if (!(flg & TCP_FLAG_RST))
- req->rsk_ops->send_reset(skb);
+ req->rsk_ops->send_reset(sk, skb);
inet_csk_reqsk_queue_drop(sk, req, prev);
return NULL;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 6a8581ab9a23..32c1a972fa31 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -270,7 +270,7 @@ static u16 tcp_select_window(struct sock *sk)
}
static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
- __u32 tstamp)
+ __u32 tstamp, __u8 **md5_hash)
{
if (tp->rx_opt.tstamp_ok) {
*ptr++ = htonl((TCPOPT_NOP << 24) |
@@ -298,16 +298,29 @@ static void tcp_build_and_update_options(__be32 *ptr, struct tcp_sock *tp,
tp->rx_opt.eff_sacks--;
}
}
+#ifdef CONFIG_TCP_MD5SIG
+ if (md5_hash) {
+ *ptr++ = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_MD5SIG << 8) |
+ TCPOLEN_MD5SIG);
+ *md5_hash = (__u8 *)ptr;
+ }
+#endif
}
/* Construct a tcp options header for a SYN or SYN_ACK packet.
* If this is every changed make sure to change the definition of
* MAX_SYN_SIZE to match the new maximum number of options that you
* can generate.
+ *
+ * Note - that with the RFC2385 TCP option, we make room for the
+ * 16 byte MD5 hash. This will be filled in later, so the pointer for the
+ * location to be filled is passed back up.
*/
static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
int offer_wscale, int wscale, __u32 tstamp,
- __u32 ts_recent)
+ __u32 ts_recent, __u8 **md5_hash)
{
/* We always get an MSS option.
* The option bytes which will be seen in normal data
@@ -346,6 +359,20 @@ static void tcp_syn_build_options(__be32 *ptr, int mss, int ts, int sack,
(TCPOPT_WINDOW << 16) |
(TCPOLEN_WINDOW << 8) |
(wscale));
+#ifdef CONFIG_TCP_MD5SIG
+ /*
+ * If MD5 is enabled, then we set the option, and include the size
+ * (always 18). The actual MD5 hash is added just before the
+ * packet is sent.
+ */
+ if (md5_hash) {
+ *ptr++ = htonl((TCPOPT_NOP << 24) |
+ (TCPOPT_NOP << 16) |
+ (TCPOPT_MD5SIG << 8) |
+ TCPOLEN_MD5SIG);
+ *md5_hash = (__u8 *) ptr;
+ }
+#endif
}
/* This routine actually transmits TCP packets queued in by
@@ -366,6 +393,10 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
struct tcp_sock *tp;
struct tcp_skb_cb *tcb;
int tcp_header_size;
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *md5;
+ __u8 *md5_hash_location;
+#endif
struct tcphdr *th;
int sysctl_flags;
int err;
@@ -424,6 +455,16 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START);
+#ifdef CONFIG_TCP_MD5SIG
+ /*
+ * Are we doing MD5 on this segment? If so - make
+ * room for it.
+ */
+ md5 = tp->af_specific->md5_lookup(sk, sk);
+ if (md5)
+ tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
+#endif
+
th = (struct tcphdr *) skb_push(skb, tcp_header_size);
skb->h.th = th;
@@ -460,13 +501,34 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
(sysctl_flags & SYSCTL_FLAG_WSCALE),
tp->rx_opt.rcv_wscale,
tcb->when,
- tp->rx_opt.ts_recent);
+ tp->rx_opt.ts_recent,
+
+#ifdef CONFIG_TCP_MD5SIG
+ md5 ? &md5_hash_location :
+#endif
+ NULL);
} else {
tcp_build_and_update_options((__be32 *)(th + 1),
- tp, tcb->when);
+ tp, tcb->when,
+#ifdef CONFIG_TCP_MD5SIG
+ md5 ? &md5_hash_location :
+#endif
+ NULL);
TCP_ECN_send(sk, tp, skb, tcp_header_size);
}
+#ifdef CONFIG_TCP_MD5SIG
+ /* Calculate the MD5 hash, as we have all we need now */
+ if (md5) {
+ tp->af_specific->calc_md5_hash(md5_hash_location,
+ md5,
+ sk, NULL, NULL,
+ skb->h.th,
+ sk->sk_protocol,
+ skb->len);
+ }
+#endif
+
icsk->icsk_af_ops->send_check(sk, skb->len, skb);
if (likely(tcb->flags & TCPCB_FLAG_ACK))
@@ -840,6 +902,11 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
(tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK));
+#ifdef CONFIG_TCP_MD5SIG
+ if (tp->af_specific->md5_lookup(sk, sk))
+ mss_now -= TCPOLEN_MD5SIG_ALIGNED;
+#endif
+
xmit_size_goal = mss_now;
if (doing_tso) {
@@ -2033,6 +2100,10 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
struct tcphdr *th;
int tcp_header_size;
struct sk_buff *skb;
+#ifdef CONFIG_TCP_MD5SIG
+ struct tcp_md5sig_key *md5;
+ __u8 *md5_hash_location;
+#endif
skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
if (skb == NULL)
@@ -2048,6 +2119,13 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
(ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
/* SACK_PERM is in the place of NOP NOP of TS */
((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
+
+#ifdef CONFIG_TCP_MD5SIG
+ /* Are we doing MD5 on this segment? If so - make room for it */
+ md5 = tcp_rsk(req)->af_specific->md5_lookup(sk, req);
+ if (md5)
+ tcp_header_size += TCPOLEN_MD5SIG_ALIGNED;
+#endif
skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
memset(th, 0, sizeof(struct tcphdr));
@@ -2085,11 +2163,29 @@ struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
tcp_syn_build_options((__be32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok,
ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale,
TCP_SKB_CB(skb)->when,
- req->ts_recent);
+ req->ts_recent,
+ (
+#ifdef CONFIG_TCP_MD5SIG
+ md5 ? &md5_hash_location :
+#endif
+ NULL)
+ );
skb->csum = 0;
th->doff = (tcp_header_size >> 2);
TCP_INC_STATS(TCP_MIB_OUTSEGS);
+
+#ifdef CONFIG_TCP_MD5SIG
+ /* Okay, we have all we need - do the md5 hash if needed */
+ if (md5) {
+ tp->af_specific->calc_md5_hash(md5_hash_location,
+ md5,
+ NULL, dst, req,
+ skb->h.th, sk->sk_protocol,
+ skb->len);
+ }
+#endif
+
return skb;
}
@@ -2108,6 +2204,11 @@ static void tcp_connect_init(struct sock *sk)
tp->tcp_header_len = sizeof(struct tcphdr) +
(sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
+#ifdef CONFIG_TCP_MD5SIG
+ if (tp->af_specific->md5_lookup(sk, sk) != NULL)
+ tp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED;
+#endif
+
/* If user gave his TCP_MAXSEG, record it to clamp */
if (tp->rx_opt.user_mss)
tp->rx_opt.mss_clamp = tp->rx_opt.user_mss;