diff options
author | Jerry Chu <hkchu@google.com> | 2012-08-31 12:29:11 +0000 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-08-31 20:02:18 -0400 |
commit | 1046716368979dee857a2b8a91c4a8833f21b9cb (patch) | |
tree | fdda70278e6fa56c27d242fb1a0ec9b7e3e81d38 /include | |
parent | 2a35cfa591ac63f17815c2d9432b799e37527980 (diff) | |
download | op-kernel-dev-1046716368979dee857a2b8a91c4a8833f21b9cb.zip op-kernel-dev-1046716368979dee857a2b8a91c4a8833f21b9cb.tar.gz |
tcp: TCP Fast Open Server - header & support functions
This patch adds all the necessary data structure and support
functions to implement TFO server side. It also documents a number
of flags for the sysctl_tcp_fastopen knob, and adds a few Linux
extension MIBs.
In addition, it includes the following:
1. a new TCP_FASTOPEN socket option an application must call to
supply a max backlog allowed in order to enable TFO on its listener.
2. A number of key data structures:
"fastopen_rsk" in tcp_sock - for a big socket to access its
request_sock for retransmission and ack processing purpose. It is
non-NULL iff 3WHS not completed.
"fastopenq" in request_sock_queue - points to a per Fast Open
listener data structure "fastopen_queue" to keep track of qlen (# of
outstanding Fast Open requests) and max_qlen, among other things.
"listener" in tcp_request_sock - to point to the original listener
for book-keeping purpose, i.e., to maintain qlen against max_qlen
as part of defense against IP spoofing attack.
3. various data structure and functions, many in tcp_fastopen.c, to
support server side Fast Open cookie operations, including
/proc/sys/net/ipv4/tcp_fastopen_key to allow manual rekeying.
Signed-off-by: H.K. Jerry Chu <hkchu@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Tom Herbert <therbert@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/snmp.h | 4 | ||||
-rw-r--r-- | include/linux/tcp.h | 45 | ||||
-rw-r--r-- | include/net/request_sock.h | 36 | ||||
-rw-r--r-- | include/net/tcp.h | 46 |
4 files changed, 121 insertions, 10 deletions
diff --git a/include/linux/snmp.h b/include/linux/snmp.h index ad6e3a6..fdfba23 100644 --- a/include/linux/snmp.h +++ b/include/linux/snmp.h @@ -241,6 +241,10 @@ enum LINUX_MIB_TCPCHALLENGEACK, /* TCPChallengeACK */ LINUX_MIB_TCPSYNCHALLENGE, /* TCPSYNChallenge */ LINUX_MIB_TCPFASTOPENACTIVE, /* TCPFastOpenActive */ + LINUX_MIB_TCPFASTOPENPASSIVE, /* TCPFastOpenPassive*/ + LINUX_MIB_TCPFASTOPENPASSIVEFAIL, /* TCPFastOpenPassiveFail */ + LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */ + LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */ __LINUX_MIB_MAX }; diff --git a/include/linux/tcp.h b/include/linux/tcp.h index eb125a4..ae46df5 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -110,6 +110,7 @@ enum { #define TCP_REPAIR_QUEUE 20 #define TCP_QUEUE_SEQ 21 #define TCP_REPAIR_OPTIONS 22 +#define TCP_FASTOPEN 23 /* Enable FastOpen on listeners */ struct tcp_repair_opt { __u32 opt_code; @@ -246,6 +247,7 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb) /* TCP Fast Open */ #define TCP_FASTOPEN_COOKIE_MIN 4 /* Min Fast Open Cookie size in bytes */ #define TCP_FASTOPEN_COOKIE_MAX 16 /* Max Fast Open Cookie size in bytes */ +#define TCP_FASTOPEN_COOKIE_SIZE 8 /* the size employed by this impl. */ /* TCP Fast Open Cookie as stored in memory */ struct tcp_fastopen_cookie { @@ -312,9 +314,14 @@ struct tcp_request_sock { /* Only used by TCP MD5 Signature so far. */ const struct tcp_request_sock_ops *af_specific; #endif + struct sock *listener; /* needed for TFO */ u32 rcv_isn; u32 snt_isn; u32 snt_synack; /* synack sent time */ + u32 rcv_nxt; /* the ack # by SYNACK. For + * FastOpen it's the seq# + * after data-in-SYN. + */ }; static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) @@ -505,14 +512,18 @@ struct tcp_sock { struct tcp_md5sig_info __rcu *md5sig_info; #endif -/* TCP fastopen related information */ - struct tcp_fastopen_request *fastopen_req; - /* When the cookie options are generated and exchanged, then this * object holds a reference to them (cookie_values->kref). Also * contains related tcp_cookie_transactions fields. */ struct tcp_cookie_values *cookie_values; + +/* TCP fastopen related information */ + struct tcp_fastopen_request *fastopen_req; + /* fastopen_rsk points to request_sock that resulted in this big + * socket. Used to retransmit SYNACKs etc. + */ + struct request_sock *fastopen_rsk; }; enum tsq_flags { @@ -552,6 +563,34 @@ static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk) return (struct tcp_timewait_sock *)sk; } +static inline bool tcp_passive_fastopen(const struct sock *sk) +{ + return (sk->sk_state == TCP_SYN_RECV && + tcp_sk(sk)->fastopen_rsk != NULL); +} + +static inline bool fastopen_cookie_present(struct tcp_fastopen_cookie *foc) +{ + return foc->len != -1; +} + +static inline int fastopen_init_queue(struct sock *sk, int backlog) +{ + struct request_sock_queue *queue = + &inet_csk(sk)->icsk_accept_queue; + + if (queue->fastopenq == NULL) { + queue->fastopenq = kzalloc( + sizeof(struct fastopen_queue), + sk->sk_allocation); + if (queue->fastopenq == NULL) + return -ENOMEM; + spin_lock_init(&queue->fastopenq->lock); + } + queue->fastopenq->max_qlen = backlog; + return 0; +} + #endif /* __KERNEL__ */ #endif /* _LINUX_TCP_H */ diff --git a/include/net/request_sock.h b/include/net/request_sock.h index 4c0766e..c3cdd6c 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -106,6 +106,34 @@ struct listen_sock { struct request_sock *syn_table[0]; }; +/* + * For a TCP Fast Open listener - + * lock - protects the access to all the reqsk, which is co-owned by + * the listener and the child socket. + * qlen - pending TFO requests (still in TCP_SYN_RECV). + * max_qlen - max TFO reqs allowed before TFO is disabled. + * + * XXX (TFO) - ideally these fields can be made as part of "listen_sock" + * structure above. But there is some implementation difficulty due to + * listen_sock being part of request_sock_queue hence will be freed when + * a listener is stopped. But TFO related fields may continue to be + * accessed even after a listener is closed, until its sk_refcnt drops + * to 0 implying no more outstanding TFO reqs. One solution is to keep + * listen_opt around until sk_refcnt drops to 0. But there is some other + * complexity that needs to be resolved. E.g., a listener can be disabled + * temporarily through shutdown()->tcp_disconnect(), and re-enabled later. + */ +struct fastopen_queue { + struct request_sock *rskq_rst_head; /* Keep track of past TFO */ + struct request_sock *rskq_rst_tail; /* requests that caused RST. + * This is part of the defense + * against spoofing attack. + */ + spinlock_t lock; + int qlen; /* # of pending (TCP_SYN_RECV) reqs */ + int max_qlen; /* != 0 iff TFO is currently enabled */ +}; + /** struct request_sock_queue - queue of request_socks * * @rskq_accept_head - FIFO head of established children @@ -129,6 +157,12 @@ struct request_sock_queue { u8 rskq_defer_accept; /* 3 bytes hole, try to pack */ struct listen_sock *listen_opt; + struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been + * enabled on this listener. Check + * max_qlen != 0 in fastopen_queue + * to determine if TFO is enabled + * right at this moment. + */ }; extern int reqsk_queue_alloc(struct request_sock_queue *queue, @@ -136,6 +170,8 @@ extern int reqsk_queue_alloc(struct request_sock_queue *queue, extern void __reqsk_queue_destroy(struct request_sock_queue *queue); extern void reqsk_queue_destroy(struct request_sock_queue *queue); +extern void reqsk_fastopen_remove(struct sock *sk, + struct request_sock *req, bool reset); static inline struct request_sock * reqsk_queue_yank_acceptq(struct request_sock_queue *queue) diff --git a/include/net/tcp.h b/include/net/tcp.h index 0fca06f..9f8821e 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -224,8 +224,24 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); /* Bit Flags for sysctl_tcp_fastopen */ #define TFO_CLIENT_ENABLE 1 +#define TFO_SERVER_ENABLE 2 #define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */ +/* Process SYN data but skip cookie validation */ +#define TFO_SERVER_COOKIE_NOT_CHKED 0x100 +/* Accept SYN data w/o any cookie option */ +#define TFO_SERVER_COOKIE_NOT_REQD 0x200 + +/* Force enable TFO on all listeners, i.e., not requiring the + * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen. + */ +#define TFO_SERVER_WO_SOCKOPT1 0x400 +#define TFO_SERVER_WO_SOCKOPT2 0x800 +/* Always create TFO child sockets on a TFO listener even when + * cookie/data not present. (For testing purpose!) + */ +#define TFO_SERVER_ALWAYS 0x1000 + extern struct inet_timewait_death_row tcp_death_row; /* sysctl variables for tcp */ @@ -421,12 +437,6 @@ extern void tcp_metrics_init(void); extern bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check); extern bool tcp_remember_stamp(struct sock *sk); extern bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw); -extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, - struct tcp_fastopen_cookie *cookie, - int *syn_loss, unsigned long *last_syn_loss); -extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss, - struct tcp_fastopen_cookie *cookie, - bool syn_lost); extern void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst); extern void tcp_disable_fack(struct tcp_sock *tp); extern void tcp_close(struct sock *sk, long timeout); @@ -537,6 +547,7 @@ extern void tcp_send_delayed_ack(struct sock *sk); extern void tcp_cwnd_application_limited(struct sock *sk); extern void tcp_resume_early_retransmit(struct sock *sk); extern void tcp_rearm_rto(struct sock *sk); +extern void tcp_reset(struct sock *sk); /* tcp_timer.c */ extern void tcp_init_xmit_timers(struct sock *); @@ -586,6 +597,7 @@ extern int tcp_mtu_to_mss(struct sock *sk, int pmtu); extern int tcp_mss_to_mtu(struct sock *sk, int mss); extern void tcp_mtup_init(struct sock *sk); extern void tcp_valid_rtt_meas(struct sock *sk, u32 seq_rtt); +extern void tcp_init_buffer_space(struct sock *sk); static inline void tcp_bound_rto(const struct sock *sk) { @@ -1104,6 +1116,7 @@ static inline void tcp_openreq_init(struct request_sock *req, req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */ req->cookie_ts = 0; tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq; + tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1; req->mss = rx_opt->mss_clamp; req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0; ireq->tstamp_ok = rx_opt->tstamp_ok; @@ -1308,15 +1321,34 @@ extern int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff extern int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key); +/* From tcp_fastopen.c */ +extern void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, + struct tcp_fastopen_cookie *cookie, + int *syn_loss, unsigned long *last_syn_loss); +extern void tcp_fastopen_cache_set(struct sock *sk, u16 mss, + struct tcp_fastopen_cookie *cookie, + bool syn_lost); struct tcp_fastopen_request { /* Fast Open cookie. Size 0 means a cookie request */ struct tcp_fastopen_cookie cookie; struct msghdr *data; /* data in MSG_FASTOPEN */ u16 copied; /* queued in tcp_connect() */ }; - void tcp_free_fastopen_req(struct tcp_sock *tp); +extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx; +int tcp_fastopen_reset_cipher(void *key, unsigned int len); +void tcp_fastopen_cookie_gen(__be32 addr, struct tcp_fastopen_cookie *foc); + +#define TCP_FASTOPEN_KEY_LENGTH 16 + +/* Fastopen key context */ +struct tcp_fastopen_context { + struct crypto_cipher __rcu *tfm; + __u8 key[TCP_FASTOPEN_KEY_LENGTH]; + struct rcu_head rcu; +}; + /* write queue abstraction */ static inline void tcp_write_queue_purge(struct sock *sk) { |