++++++++++++++++++++++++++++++++++++++++++++++++++++
./include/net/tcp.h
__u32 cookie_v4_init_sequence(struct request_sock *req, struct sock *sk,
const struct sk_buff *skb, __u16 *mss);
__u32 cookie_v6_init_sequence(struct request_sock *req, struct sock *sk,
const struct sk_buff *skb, __u16 *mss);
int (*init_req)(struct request_sock *req, struct sock *sk_listener,
struct sk_buff *skb, bool want_cookie);
__u32 (*cookie_init_seq)(struct request_sock *req, struct sock *sk,
const struct sk_buff *skb, __u16 *mss);
#ifdef CONFIG_SYN_COOKIES
static inline __u32 cookie_init_sequence(struct request_sock *req,
const struct tcp_request_sock_ops *ops,
struct sock *sk, struct sk_buff *skb,
__u16 *mss)
{
tcp_synq_overflow(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
return ops->cookie_init_seq(req, sk, skb, mss);
}
#else
static inline __u32 cookie_init_sequence(struct request_sock *req,
const struct tcp_request_sock_ops *ops,
struct sock *sk, struct sk_buff *skb,
__u16 *mss)
++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/inet_connection_sock.c
u32 inet_synq_hash(const __be32 raddr, const __be16 rport, const u32 rnd,
const u32 synq_hsize)
{
return jhash_2words((__force u32)raddr, (__force u32)rport, rnd) & (synq_hsize - 1);
}
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr,
inet_rsk(req)->ir_rmt_port,
lopt->hash_rnd, lopt->nr_table_entries);
reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout);
inet_csk_reqsk_queue_added(sk, timeout);
}
EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
if (sk_state_load(sk_listener) != TCP_LISTEN && !is_meta_sk(sk_listener))
int rc = reqsk_queue_alloc(&icsk->icsk_accept_queue, GFP_KERNEL);
if (rc != 0)
return rc;
++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/syncookies.c
__u32 cookie_v4_init_sequence(struct request_sock *req, const struct sk_buff *skb, __u16 *mssp)
mptcp_init_mp_opt(&mopt);
tcp_parse_options(skb, &tcp_opt, &mopt, 0, NULL, NULL);
#ifdef CONFIG_MPTCP
if (mopt.saw_mpc)
req = inet_reqsk_alloc(&mptcp_request_sock_ops, sk, false);
else
#endif
req = inet_reqsk_alloc(&tcp_request_sock_ops, sk, false);
treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0;
if (mopt.saw_mpc)
mptcp_cookies_reqsk_init(req, &mopt, skb);
tp->ops->select_initial_window(tcp_full_space(sk), req->mss,
&req->rsk_rcv_wnd, &req->rsk_window_clamp,
ireq->wscale_ok, &rcv_wscale,
dst_metric(&rt->dst, RTAX_INITRWND), sk);
++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp.c
__u32 new_window = tp->ops->__select_window(sk);
tp->ops->cleanup_rbuf(sk, copied);
tp->ops->cleanup_rbuf(sk, copied);
tp->ops->cleanup_rbuf(sk, copied);
tcp_sk(sk)->ops->send_fin(sk);
tp->ops->*(sk, copied);
++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp_fastopen.c
atomic_set(&req->rsk_refcnt, 1);
inet_csk_reqsk_queue_add(sk, req, child);
static struct sock *tcp_fastopen_create_child(struct sock *sk,
meta_sk = child;
if (!mptcp_check_req_fastopen(meta_sk, req)) {
child = tcp_sk(meta_sk)->mpcb->master_sk;
tp = tcp_sk(child);
}
inet_csk(child)->icsk_af_ops->rebuild_header(child);
tcp_init_congestion_control(child);
tcp_mtup_init(child);
tcp_init_metrics(child);
tp->ops->init_buffer_space(child);
sk->sk_data_ready(sk);
if (mptcp(tcp_sk(child)))
bh_unlock_sock(child);
bh_unlock_sock(meta_sk);
WARN_ON(!req->sk);
++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp_input.c
if (meta_tp->rcv_ssthresh < meta_tp->window_clamp &&
(int)meta_tp->rcv_ssthresh < tcp_space(meta_sk) &&
!tcp_under_memory_pressure(sk)) {
tp->ops->set_rto(sk);
void tcp_parse_options(const struct sk_buff *skb,
struct tcp_options_received *opt_rx,
struct mptcp_options_received *mopt,
int estab, struct tcp_fastopen_cookie *foc,
struct tcp_sock *tp)
tcp_parse_options(skb, &tp->rx_opt,
mptcp(tp) ? &tp->mptcp->rx_opt : NULL, 1, NULL, tp);
dst = __sk_dst_get(sk);
if (!dst || !dst_metric(dst, RTAX_QUICKACK))
inet_csk(sk)->icsk_ack.pingpong = 1;
if (mptcp(tp))
mptcp_sub_close_passive(sk);
tp->ops->time_wait(sk, TCP_TIME_WAIT, 0);
if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq) &&
!(mptcp(tp) && end_seq == seq)) {
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq &&
!(mptcp(tp) && mptcp_is_data_fin(skb)))
goto drop;
void tcp_cwnd_application_limited(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
if (inet_csk(sk)->icsk_ca_state == TCP_CA_Open &&
sk->sk_socket && !test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
u32 win_used = max(tp->snd_cwnd_used, init_win);
if (win_used < tp->snd_cwnd) {
tp->snd_ssthresh = tcp_current_ssthresh(sk);
tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
}
tp->snd_cwnd_used = 0;
}
tp->snd_cwnd_stamp = tcp_time_stamp;
}
tp->ops->__select_window(sk) >= tp->rcv_wnd) ||
tcp_parse_options(synack, &opt, NULL, 0, NULL, NULL);
tcp_parse_options(skb, &tp->rx_opt,
mptcp(tp) ? &tp->mptcp->rx_opt : &mopt, 0, &foc, tp);
int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
__releases(&sk->sk_lock.slock)
queued = tcp_rcv_synsent_state_process(sk, skb, th);
tcp_parse_options(skb, &tmp_opt, NULL, 0, want_cookie ? NULL : &foc, NULL);
if (af_ops->init_req(req, sk, skb, want_cookie))
goto drop_and_free;
isn = cookie_init_sequence(req, af_ops, sk, skb, &req->mss);
++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp_ipv4.c
tcp_sk(sk)->ops->retransmit_timer(sk);
void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
static void tcp_v4_send_ack(struct net *net,
struct sk_buff *skb, u32 seq, u32 ack, u32 data_ack,
u32 win, u32 tsval, u32 tsecr, int oif,
struct tcp_md5sig_key *key,
int reply_flags, u8 tos, int mptcp)
void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
tcp_v4_send_ack(sock_net(sk), skb, seq,
tcp_rsk(req)->rcv_nxt, 0, req->rsk_rcv_wnd,
tcp_time_stamp,
req->ts_recent,
0,
tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
AF_INET),
inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
ip_hdr(skb)->tos, 0);
int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
bool attach_req)
-static void tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
- struct sk_buff *skb)
+static int tcp_v4_init_req(struct request_sock *req, struct sock *sk_listener,
+ struct sk_buff *skb, bool want_cookie)
++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp_ipv4.c
struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
struct sock *nsk = tcp_v4_hnd_req(sk, skb);
if (sk && sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
if (!sk)
goto no_tcp_socket;
if (mptcp(tcp_sk(sk))) {
meta_sk = mptcp_meta_sk(sk);
bh_lock_sock_nested(meta_sk);
if (sock_owned_by_user(meta_sk))
skb->sk = sk;
} else {
meta_sk = sk;
bh_lock_sock_nested(sk);
}
if (tcp_checksum_complete(skb)) {
++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp_minisocks.c
mptcp_init_mp_opt(&mopt);
tcp_parse_options(skb, &tmp_opt, &mopt, 0, NULL, NULL);
tp->ops->select_initial_window(tcp_full_space(sk),
mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) -
(ireq->saw_mpc ? MPTCP_SUB_LEN_DSM_ALIGN : 0),
BUG_ON(!mptcp(tcp_sk(sk)) && fastopen == (sk->sk_state == TCP_LISTEN));
mptcp_init_mp_opt(&mopt);
tcp_parse_options(skb, &tmp_opt, &mopt, 0, NULL, NULL);
++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp_output.c
void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
u32 cur_win = tcp_receive_window(mptcp(tp) ? tcp_sk(mptcp_meta_sk(sk)) : tp);
u32 new_win = tp->ops->__select_window(sk);
static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
struct tcp_out_options *opts, struct sk_buff *skb)
tcp_sk(sk)->ops->write_xmit(sk, tcp_current_mss(sk),
tcp_sk(sk)->nonagle, 0, GFP_ATOMIC);
th->window = htons(tp->ops->select_window(sk));
tcp_options_write((__be32 *)(th + 1), tp, &opts, skb);
void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
WARN_ON_ONCE(skb_cloned(skb));
if (skb->len <= mss_now || (is_meta_sk(sk) && !mptcp_sk_can_gso(sk)) ||
(!is_meta_sk(sk) && !sk_can_gso(sk)) || skb->ip_summed == CHECKSUM_NONE) {
tcp_skb_pcount_set(skb, 1);
shinfo->gso_size = 0;
shinfo->gso_type = 0;
} else {
tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
shinfo->gso_size = mss_now;
shinfo->gso_type = sk->sk_gso_type;
}
}
int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb,
unsigned int mss_now)
{
int tso_segs = tcp_skb_pcount(skb);
if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) {
tcp_set_skb_tso_segs(sk, skb, mss_now);
tso_segs = tcp_skb_pcount(skb);
}
return tso_segs;
}
if (tp->ops->cwnd_validate)
tp->ops->cwnd_validate(sk, is_cwnd_limited);
tp->ops->write_xmit(sk, mss, TCP_NAGLE_OFF, 2, GFP_ATOMIC);
tcp_options_write((__be32 *)(th + 1), NULL, &opts, skb);
++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv4/tcp_timer.c
tp->ops->send_active_reset(sk, GFP_ATOMIC);
++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv6/syncookies.c
static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
__u32 cookie_v6_init_sequence(struct request_sock *req, struct sock *sk,
const struct sk_buff *skb, __u16 *mssp)
tcp_synq_overflow(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
mptcp_init_mp_opt(&mopt);
tcp_parse_options(skb, &tcp_opt, &mopt, 0, NULL, NULL);
#ifdef CONFIG_MPTCP
if (mopt.saw_mpc)
req = inet_reqsk_alloc(&mptcp6_request_sock_ops, sk, false);
else
#endif
req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk, false);
tp->ops->select_initial_window(tcp_full_space(sk), req->mss,
&req->rsk_rcv_wnd, &req->rsk_window_clamp,
ireq->wscale_ok, &rcv_wscale,
dst_metric(dst, RTAX_INITRWND), sk);
++++++++++++++++++++++++++++++++++++++++++++++++++++
./net/ipv6/tcp_ipv6.c
static int tcp_v6_init_req(struct request_sock *req, const struct sock *sk_listener,
struct sk_buff *skb, bool want_cookie)
tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, 0, oif, key, 1, 0, 0, 0);
static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
u32 ack, u32 data_ack, u32 win, u32 tsval, u32 tsecr, int oif,
struct tcp_md5sig_key *key, u8 tclass,
u32 label, int mptcp)
{
tcp_v6_send_response(sk, skb, seq, ack, data_ack, win, tsval, tsecr, oif,
key, 0, tclass, label, mptcp);
}
tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
tcp_rsk(req)->rcv_nxt, 0, req->rsk_rcv_wnd,
tcp_time_stamp, req->ts_recent, sk->sk_bound_dev_if,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
0, 0, 0);
struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
if (tcp_v6_inbound_md5_hash(sk, skb))
goto discard_and_relse;
if (tcp_checksum_complete(skb)) {
++++++++++++++++++++++++++++++++++++++++++++++++++++
net/core/request_sock.c
void reqsk_queue_alloc(struct request_sock_queue *queue)
++++++++++++++++++++++++++++++++++++++++++++++++++++
include/net/inet6_connection_sock.h
u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
const u32 rnd, const u32 synq_hsize);
++++++++++++++++++++++++++++++++++++++++++++++++++++
net/ipv6/inet6_connection_sock.c
u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
const u32 rnd, const u32 synq_hsize)
++++++++++++++++++++++++++++++++++++++++++++++++++++
include/linux/skbuff.h
char cb[56] __aligned(8);
++++++++++++++++++++++++++++++++++++++++++++++++++++
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 基于Microsoft.Extensions.AI核心库实现RAG应用
· Linux系列:如何用heaptrack跟踪.NET程序的非托管内存泄露
· 开发者必知的日志记录最佳实践
· SQL Server 2025 AI相关能力初探
· Linux系列:如何用 C#调用 C方法造成内存泄露
· 无需6万激活码!GitHub神秘组织3小时极速复刻Manus,手把手教你使用OpenManus搭建本
· Manus爆火,是硬核还是营销?
· 终于写完轮子一部分:tcp代理 了,记录一下
· 别再用vector<bool>了!Google高级工程师:这可能是STL最大的设计失误
· 单元测试从入门到精通