Android - porting MPTCP on android-x86-6.0-rc1 (some files may cause issues)

++++++++++++++++++++++++++++++++++++++++++++++++++++

./include/net/tcp.h

//__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
__u32 cookie_v4_init_sequence(struct request_sock *req, struct sock *sk,
			      const struct sk_buff *skb, __u16 *mss);

//__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
__u32 cookie_v6_init_sequence(struct request_sock *req, struct sock *sk,
			      const struct sk_buff *skb, __u16 *mss);

	//void (*init_req)(struct request_sock *req,
	//		 const struct sock *sk_listener,
	//		 struct sk_buff *skb);
	int (*init_req)(struct request_sock *req, struct sock *sk_listener,
			 struct sk_buff *skb, bool want_cookie);

	//__u32 (*cookie_init_seq)(const struct sk_buff *skb,
	//			 __u16 *mss);
	__u32 (*cookie_init_seq)(struct request_sock *req, struct sock *sk,
				 const struct sk_buff *skb, __u16 *mss);

#ifdef CONFIG_SYN_COOKIES
//static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
//					 const struct sock *sk, struct sk_buff *skb,
//					 __u16 *mss)
static inline __u32 cookie_init_sequence(struct request_sock *req,
					 const struct tcp_request_sock_ops *ops,
					 struct sock *sk, struct sk_buff *skb,
					 __u16 *mss)
{
	tcp_synq_overflow(sk);
	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
	//return ops->cookie_init_seq(skb, mss);
	return ops->cookie_init_seq(req, sk, skb, mss);
}
#else
//static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
//					 const struct sock *sk, struct sk_buff *skb,
//					 __u16 *mss)
static inline __u32 cookie_init_sequence(struct request_sock *req,
					 const struct tcp_request_sock_ops *ops,
					 struct sock *sk, struct sk_buff *skb,
					 __u16 *mss)

++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/inet_connection_sock.c

// new add fun
u32 inet_synq_hash(const __be32 raddr, const __be16 rport, const u32 rnd,
		   const u32 synq_hsize)
{
	return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
}

/*
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
				   unsigned long timeout)
{
	reqsk_queue_hash_req(req, timeout);
	inet_csk_reqsk_queue_added(sk);
}
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
*/
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
				   unsigned long timeout)
{
	struct inet_connection_sock *icsk = inet_csk(sk);
	struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
	const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
				     inet_rsk(req)->ir_rmt_port,
				     lopt->hash_rnd, lopt->nr_table_entries);

	reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
	inet_csk_reqsk_queue_added(sk, timeout);
}
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);

	//if (sk_state_load(sk_listener) != TCP_LISTEN)
	if (sk_state_load(sk_listener) != TCP_LISTEN && !is_meta_sk(sk_listener))

	//reqsk_queue_alloc(&icsk->icsk_accept_queue);
	int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, GFP_KERNEL);
	if (rc != 0)
		return rc;

++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/syncookies.c

//__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mssp)
__u32 cookie_v4_init_sequence(struct request_sock *req, const struct sk_buff *skb, __u16 *mssp)

	//tcp_parse_options(skb, &tcp_opt, 0, NULL);
	mptcp_init_mp_opt(&mopt);
	tcp_parse_options(skb, &tcp_opt, &mopt, 0, NULL, NULL);

	//req = inet_reqsk_alloc(&tcp_request_sock_ops, sk, false); /* for safety */
#ifdef CONFIG_MPTCP
	if (mopt.saw_mpc)
		req = inet_reqsk_alloc(&mptcp_request_sock_ops, sk, false); /* for safety */
	else
#endif
		req = inet_reqsk_alloc(&tcp_request_sock_ops, sk, false); /* for safety */


// new add, can del ?
	treq->snt_synack	= tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;

	if (mopt.saw_mpc)
		mptcp_cookies_reqsk_init(req, &mopt, skb);

	//tcp_select_initial_window(tcp_full_space(sk), req->mss,
	//			  &req->rsk_rcv_wnd, &req->rsk_window_clamp,
	//			  ireq->wscale_ok, &rcv_wscale,
	//			  dst_metric(&rt->dst, RTAX_INITRWND));
	tp->ops->select_initial_window(tcp_full_space(sk), req->mss,
				  &req->rsk_rcv_wnd, &req->rsk_window_clamp,
				  ireq->wscale_ok, &rcv_wscale,
				  dst_metric(&rt->dst, RTAX_INITRWND), sk);

++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp.c

			//__u32 new_window = __tcp_select_window(sk);
			__u32 new_window = tp->ops->__select_window(sk);

		//tcp_cleanup_rbuf(sk, copied);
		tp->ops->cleanup_rbuf(sk, copied);

		//tcp_cleanup_rbuf(sk, copied);
		tp->ops->cleanup_rbuf(sk, copied);

	//tcp_cleanup_rbuf(sk, copied);
	tp->ops->cleanup_rbuf(sk, copied);

			//tcp_send_fin(sk);
			tcp_sk(sk)->ops->send_fin(sk);

/* These states need RST on ABORT according to RFC793 */
/*
static inline bool tcp_need_reset(int state)
{
	return (1 << state) &
	       (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
		TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
}
*/

	//tcp_*(sk, copied);
	tp->ops->*(sk, copied);

++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp_fastopen.c

	//atomic_set(&req->rsk_refcnt, 2);
	atomic_set(&req->rsk_refcnt, 1);
	/* Add the child socket directly into the accept queue */
	inet_csk_reqsk_queue_add(sk, req, child);

	/* Now finish processing the fastopen child socket. */
	//inet_csk(child)->icsk_af_ops->rebuild_header(child);
	//tcp_init_congestion_control(child);
	//tcp_mtup_init(child);
	//tcp_init_metrics(child);
	//tcp_init_buffer_space(child);

//static bool tcp_fastopen_create_child(struct sock *sk,
static struct sock *tcp_fastopen_create_child(struct sock *sk,

// new add
	meta_sk = child;
	if (!mptcp_check_req_fastopen(meta_sk, req)) {
		child = tcp_sk(meta_sk)->mpcb->master_sk;
		tp = tcp_sk(child);
	}
	/* Now finish processing the fastopen child socket. */
	inet_csk(child)->icsk_af_ops->rebuild_header(child);
	tcp_init_congestion_control(child);
	tcp_mtup_init(child);
	tcp_init_metrics(child);
	tp->ops->init_buffer_space(child);

	sk->sk_data_ready(sk);
	if (mptcp(tcp_sk(child)))
		bh_unlock_sock(child);
	bh_unlock_sock(meta_sk);
	//sock_put(child);
	WARN_ON(!req->sk);
	//return true;

++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp_input.c

//#define FLAG_DATA		0x01 /* Incoming frame contained data.		*/
//#define FLAG_WIN_UPDATE		0x02 /* Incoming ACK was a window update.	*/
//#define FLAG_DATA_ACKED		0x04 /* This ACK acknowledged new data.		*/
//#define FLAG_RETRANS_DATA_ACKED	0x08 /* "" "" some of which was retransmitted.	*/
//#define FLAG_SYN_ACKED		0x10 /* This ACK acknowledged SYN.		*/
//#define FLAG_DATA_SACKED	0x20 /* New SACK.				*/
//#define FLAG_ECE		0x40 /* ECE in this ACK				*/
//#define FLAG_LOST_RETRANS	0x80 /* This ACK marks some retransmission lost */
//#define FLAG_SLOWPATH		0x100 /* Do not skip RFC checks for window update.*/
//#define FLAG_ORIG_SACK_ACKED	0x200 /* Never retransmitted data are (s)acked	*/
//#define FLAG_SND_UNA_ADVANCED	0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
//#define FLAG_DSACKING_ACK	0x800 /* SACK blocks contained D-SACK info */
//#define FLAG_SACK_RENEGING	0x2000 /* snd_una advanced to a sacked seq */
//#define FLAG_UPDATE_TS_RECENT	0x4000 /* tcp_replace_ts_recent() */

//#define FLAG_ACKED		(FLAG_DATA_ACKED|FLAG_SYN_ACKED)
//#define FLAG_NOT_DUP		(FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)
//#define FLAG_CA_ALERT		(FLAG_DATA_SACKED|FLAG_ECE)
//#define FLAG_FORWARD_PROGRESS	(FLAG_ACKED|FLAG_DATA_SACKED)

	/* Check #1 */
	//if (tp->rcv_ssthresh < tp->window_clamp &&
	//    (int)tp->rcv_ssthresh < tcp_space(sk) &&
	if (meta_tp->rcv_ssthresh < meta_tp->window_clamp &&
	    (int)meta_tp->rcv_ssthresh < tcp_space(meta_sk) &&
	    //!sk_under_memory_pressure(sk)) {
	    !tcp_under_memory_pressure(sk)) {

	//tcp_set_rto(sk);
	tp->ops->set_rto(sk);

void tcp_parse_options(const struct sk_buff *skb,
		       //struct tcp_options_received *opt_rx, int estab,
		       //struct tcp_fastopen_cookie *foc)
		       struct tcp_options_received *opt_rx,
		       struct mptcp_options_received *mopt,
		       int estab, struct tcp_fastopen_cookie *foc,
		       struct tcp_sock *tp)

	//tcp_parse_options(skb, &tp->rx_opt, 1, NULL);
	tcp_parse_options(skb, &tp->rx_opt,
			  mptcp(tp) ? &tp->mptcp->rx_opt : NULL, 1, NULL, tp);

		//inet_csk(sk)->icsk_ack.pingpong = 1;
		dst = __sk_dst_get(sk);
		if (!dst || !dst_metric(dst, RTAX_QUICKACK))
			inet_csk(sk)->icsk_ack.pingpong = 1;
		if (mptcp(tp))
			mptcp_sub_close_passive(sk);

		//tcp_time_wait(sk, TCP_TIME_WAIT, 0);
		tp->ops->time_wait(sk, TCP_TIME_WAIT, 0);

//static bool tcp_prune_ofo_queue(struct sock *sk);

		//if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
		/* MPTCP allows non-data data-fin to be in the ofo-queue */
		if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq) &&
		    !(mptcp(tp) && end_seq == seq)) {

	//if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq)
	//	goto drop;
	/* If no data is present, but a data_fin is in the options, we still
	 * have to call mptcp_queue_skb later on. */
	if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq &&
	    !(mptcp(tp) && mptcp_is_data_fin(skb)))
		goto drop;

// new add
/* RFC2861, slow part. Adjust cwnd, after it was not full during one rto.
 * As additional protections, we do not touch cwnd in retransmission phases,
 * and if application hit its sndbuf limit recently.
 */
void tcp_cwnd_application_limited(struct sock *sk)
{
	struct tcp_sock *tp = tcp_sk(sk);

	if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
	    sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
		/* Limited by application or receiver window. */
		u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
		u32 win_used = max(tp->snd_cwnd_used, init_win);
		if (win_used < tp->snd_cwnd) {
			tp->snd_ssthresh = tcp_current_ssthresh(sk);
			tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
		}
		tp->snd_cwnd_used = 0;
	}
	tp->snd_cwnd_stamp = tcp_time_stamp;
}

	     //__tcp_select_window(sk) >= tp->rcv_wnd) ||
	     tp->ops->__select_window(sk) >= tp->rcv_wnd) ||

		//tcp_parse_options(synack, &opt, 0, NULL);
		tcp_parse_options(synack, &opt, NULL, 0, NULL, NULL);

	//tcp_parse_options(skb, &tp->rx_opt, 0, &foc);
	tcp_parse_options(skb, &tp->rx_opt,
			  mptcp(tp) ? &tp->mptcp->rx_opt : &mopt, 0, &foc, tp);

//int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
//			  const struct tcphdr *th, unsigned int len)
//	__releases(&sk->sk_lock.slock)
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
	__releases(&sk->sk_lock.slock)

//		queued = tcp_rcv_synsent_state_process(sk, skb, th, len);
		queued = tcp_rcv_synsent_state_process(sk, skb, th);

	//tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
	tcp_parse_options(skb, &tmp_opt, NULL, 0, want_cookie ? NULL : &foc, NULL);

	//af_ops->init_req(req, sk, skb);
	if (af_ops->init_req(req, sk, skb, want_cookie))
		goto drop_and_free;

		//isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
		isn = cookie_init_sequence(req, af_ops, sk, skb, &req->mss);

++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp_ipv4.c

	//tp = tcp_sk(sk);

			//tcp_retransmit_timer(sk);
			tcp_sk(sk)->ops->retransmit_timer(sk);

//void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)

static void tcp_v4_send_ack(struct net *net,
			    //struct sk_buff *skb, u32 seq, u32 ack,
			    struct sk_buff *skb, u32 seq, u32 ack, u32 data_ack,
			    u32 win, u32 tsval, u32 tsecr, int oif,
			    struct tcp_md5sig_key *key,
			    //int reply_flags, u8 tos)
			    int reply_flags, u8 tos, int mptcp)

//void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
//			   struct request_sock *req)
void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
				  struct request_sock *req)

	tcp_v4_send_ack(sock_net(sk), skb, seq,
			//tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
			tcp_rsk(req)->rcv_nxt, 0, req->rsk_rcv_wnd,
			tcp_time_stamp,
			req->ts_recent,
			0,
			tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
					  AF_INET),
			inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
			//ip_hdr(skb)->tos);
			ip_hdr(skb)->tos, 0);

//int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
//		       struct flowi *fl,
//		       struct request_sock *req,
//		       u16 queue_mapping,
//		       struct tcp_fastopen_cookie *foc)
int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
			      struct flowi *fl,
			      struct request_sock *req,
			      struct tcp_fastopen_cookie *foc,
				  bool attach_req)

-static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
-			    struct sk_buff *skb)
+static int tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
+			   struct sk_buff *skb, bool want_cookie)


++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp_ipv4.c

// new add
struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)

		//struct sock *nsk = tcp_v4_cookie_check(sk, skb);
		struct sock *nsk = tcp_v4_hnd_req(sk, skb);

	//if (!sk)
	//	goto no_tcp_socket;

	//if (sk->sk_state == TCP_TIME_WAIT)
	if (sk && sk->sk_state == TCP_TIME_WAIT)
		goto do_time_wait;
	if (!sk)
		goto no_tcp_socket;

	//bh_lock_sock_nested(sk);
	if (mptcp(tcp_sk(sk))) {
		meta_sk = mptcp_meta_sk(sk);

		bh_lock_sock_nested(meta_sk);
		if (sock_owned_by_user(meta_sk))
			skb->sk = sk;
	} else {
		meta_sk = sk;
		bh_lock_sock_nested(sk);
	}

//	if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
	if (tcp_checksum_complete(skb)) {


++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp_minisocks.c

		//tcp_parse_options(skb, &tmp_opt, 0, NULL);
		mptcp_init_mp_opt(&mopt);
		tcp_parse_options(skb, &tmp_opt, &mopt, 0, NULL, NULL);

	//tcp_select_initial_window(full_space,
	//	mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
	tp->ops->select_initial_window(tcp_full_space(sk),
		mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) -
		(ireq->saw_mpc ? MPTCP_SUB_LEN_DSM_ALIGN : 0),

// new add
	BUG_ON(!mptcp(tcp_sk(sk)) && fastopen == (sk->sk_state == TCP_LISTEN));
	mptcp_init_mp_opt(&mopt);

		//tcp_parse_options(skb, &tmp_opt, 0, NULL);
		tcp_parse_options(skb, &tmp_opt, &mopt, 0, NULL, NULL);


++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp_output.c

//static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
//			   int push_one, gfp_t gfp);

/* Account for new data that has been sent to the network. */
//static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)

	//u32 cur_win = tcp_receive_window(tp);
	//u32 new_win = __tcp_select_window(sk);
	/* The window must never shrink at the meta-level. At the subflow we
	 * have to allow this. Otherwise we may announce a window too large
	 * for the current meta-level sk_rcvbuf.
	 */
	u32 cur_win = tcp_receive_window(mptcp(tp) ? tcp_sk(mptcp_meta_sk(sk)) : tp);
	u32 new_win = tp->ops->__select_window(sk);

static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
			      //struct tcp_out_options *opts)
			      struct tcp_out_options *opts, struct sk_buff *skb)

		//tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
		//	       0, GFP_ATOMIC);
		tcp_sk(sk)->ops->write_xmit(sk, tcp_current_mss(sk),
					    tcp_sk(sk)->nonagle, 0, GFP_ATOMIC);

		//th->window	= htons(tcp_select_window(sk));
		th->window	= htons(tp->ops->select_window(sk));

	//tcp_options_write((__be32 *)(th + 1), tp, &opts);
	tcp_options_write((__be32 *)(th + 1), tp, &opts, skb);

/* Initialize TSO segments for a packet. */
//static void tcp_set_skb_tso_segs(struct sk_buff *skb, unsigned int mss_now)
//{
//	if (skb->len <= mss_now || skb->ip_summed == CHECKSUM_NONE) {
		/* Avoid the costly divide in the normal
		 * non-TSO case.
		 */
//		tcp_skb_pcount_set(skb, 1);
//		TCP_SKB_CB(skb)->tcp_gso_size = 0;
//	} else {
//		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
//		TCP_SKB_CB(skb)->tcp_gso_size = mss_now;
//	}
//}
void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
			  unsigned int mss_now)
{
	struct skb_shared_info *shinfo = skb_shinfo(skb);

	/* Make sure we own this skb before messing gso_size/gso_segs */
	WARN_ON_ONCE(skb_cloned(skb));

	if (skb->len <= mss_now || (is_meta_sk(sk) && !mptcp_sk_can_gso(sk)) ||
	    (!is_meta_sk(sk) && !sk_can_gso(sk)) || skb->ip_summed == CHECKSUM_NONE) {
		/* Avoid the costly divide in the normal
		 * non-TSO case.
		 */
		tcp_skb_pcount_set(skb, 1);
		shinfo->gso_size = 0;
		shinfo->gso_type = 0;
	} else {
		tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
		shinfo->gso_size = mss_now;
		shinfo->gso_type = sk->sk_gso_type;
	}
}

/* Initialize TSO state of a skb.
 * This must be invoked the first time we consider transmitting
 * SKB onto the wire.
 */
/*
static int tcp_init_tso_segs(struct sk_buff *skb, unsigned int mss_now)
{
	int tso_segs = tcp_skb_pcount(skb);

	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
		tcp_set_skb_tso_segs(skb, mss_now);
		tso_segs = tcp_skb_pcount(skb);
	}
	return tso_segs;
}	*/
int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
		      unsigned int mss_now)
{
	int tso_segs = tcp_skb_pcount(skb);

	if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
		tcp_set_skb_tso_segs(sk, skb, mss_now);
		tso_segs = tcp_skb_pcount(skb);
	}
	return tso_segs;
}

		//tcp_cwnd_validate(sk, is_cwnd_limited);
		if (tp->ops->cwnd_validate)
			tp->ops->cwnd_validate(sk, is_cwnd_limited);

			//tcp_write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
			tp->ops->write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);

	//tcp_options_write((__be32 *)(th + 1), NULL, &opts);
	//tcp_options_write((__be32 *)(th + 1), tp, &opts, skb);
	tcp_options_write((__be32 *)(th + 1), NULL, &opts, skb);



++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp_timer.c

			//tcp_send_active_reset(sk, GFP_ATOMIC);
			tp->ops->send_active_reset(sk, GFP_ATOMIC);


++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv6/syncookies.c

// not add, use tcp_get_cookie_sock() in ./net/ipv4/syncookies.c
static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
					   struct request_sock *req,
					   struct dst_entry *dst)

//__u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mssp)
__u32 cookie_v6_init_sequence(struct request_sock *req, struct sock *sk,
			      const struct sk_buff *skb, __u16 *mssp)

// new add
	tcp_synq_overflow(sk);
	NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);

	//tcp_parse_options(skb, &tcp_opt, 0, NULL);
	mptcp_init_mp_opt(&mopt);
	tcp_parse_options(skb, &tcp_opt, &mopt, 0, NULL, NULL);

	//req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk, false);
#ifdef CONFIG_MPTCP
	if (mopt.saw_mpc)
		req = inet_reqsk_alloc(&mptcp6_request_sock_ops, sk, false);
	else
#endif
		req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk, false);

	//tcp_select_initial_window(tcp_full_space(sk), req->mss,
	tp->ops->select_initial_window(tcp_full_space(sk), req->mss,
				  &req->rsk_rcv_wnd, &req->rsk_window_clamp,
				  ireq->wscale_ok, &rcv_wscale,
				  //dst_metric(dst, RTAX_INITRWND));
				  dst_metric(dst, RTAX_INITRWND), sk);


++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv6/tcp_ipv6.c

// del
//static void	tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
//static void	tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
//				      struct request_sock *req);
//static int	tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
//static const struct inet_connection_sock_af_ops ipv6_mapped;
//static const struct inet_connection_sock_af_ops ipv6_specific;

	//tp = tcp_sk(sk);

//static void tcp_v6_init_req(struct request_sock *req,
//			    const struct sock *sk_listener,
//			    struct sk_buff *skb)
static int tcp_v6_init_req(struct request_sock *req, const struct sock *sk_listener,
			   struct sk_buff *skb, bool want_cookie)

	//tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
	tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, 0, oif, key, 1, 0, 0, 0);

static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
			    //u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
			    u32 ack, u32 data_ack, u32 win, u32 tsval, u32 tsecr, int oif,
			    struct tcp_md5sig_key *key, u8 tclass,
			    //u32 label)
			    u32 label, int mptcp)
{
	//tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
	//		     tclass, label);
	tcp_v6_send_response(sk, skb, seq, ack, data_ack, win, tsval, tsecr, oif,
			     key, 0, tclass, label, mptcp);

}

	tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
			tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
			//tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
			tcp_rsk(req)->rcv_nxt, 0, req->rsk_rcv_wnd,
			tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
			tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
			//0, 0);
			0, 0, 0);

// new add
struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)

//#ifdef CONFIG_TCP_MD5SIG
	if (tcp_v6_inbound_md5_hash(sk, skb))
		goto discard_and_relse;
//#endif

//	if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
	if (tcp_checksum_complete(skb)) {



++++++++++++++++++++++++++++++++++++++++++++++++++++
net/core/request_sock.c

//int reqsk_queue_alloc(struct request_sock_queue *queue,
//		      unsigned int nr_table_entries,
//		      gfp_t flags)
void reqsk_queue_alloc(struct request_sock_queue *queue)

++++++++++++++++++++++++++++++++++++++++++++++++++++
include/net/inet6_connection_sock.h

u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
		    const u32 rnd, const u32 synq_hsize);

++++++++++++++++++++++++++++++++++++++++++++++++++++
net/ipv6/inet6_connection_sock.c

// new add

/*
 * request_sock (formerly open request) hash tables.
 */
u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
		    const u32 rnd, const u32 synq_hsize)

++++++++++++++++++++++++++++++++++++++++++++++++++++
include/linux/skbuff.h

	//char			cb[48] __aligned(8);
	char			cb[56] __aligned(8);

++++++++++++++++++++++++++++++++++++++++++++++++++++


posted @ 2016-09-06 21:37  张同光  阅读(89)  评论(0编辑  收藏  举报