tcp.h

linux

5.5.9

Brick

º¯ÊýÃû³Æ
tcp_under_memory_pressureoptimized version of sk_under_memory_pressure() for TCP sockets
beforeThe next routines deal with comparing 32 bit unsigned ints * and worry about wraparound (automatic with unsigned arithmetic).
betweenis s2<=s1<=s3 ?
tcp_out_of_memorystatic inline bool tcp_out_of_memory(struct sock *sk)
tcp_too_many_orphansstatic inline bool tcp_too_many_orphans(struct sock *sk, int shift)
tcp_dec_quickack_modestatic inline void tcp_dec_quickack_mode(struct sock *sk, const unsigned int pkts)
tcp_clear_xmit_timersstatic inline void tcp_clear_xmit_timers(struct sock *sk)
tcp_bound_to_half_wndBound MSS / TSO packet size with the half of the window
tcp_bound_rtostatic inline void tcp_bound_rto(const struct sock *sk)
__tcp_set_rtostatic inline u32 __tcp_set_rto(const struct tcp_sock *tp)
__tcp_fast_path_onstatic inline void __tcp_fast_path_on(struct tcp_sock *tp, u32snd_wnd)
tcp_fast_path_onstatic inline void tcp_fast_path_on(struct tcp_sock *tp)
tcp_fast_path_checkstatic inline void tcp_fast_path_check(struct sock *sk)
tcp_rto_minCompute the actual rto_min value
tcp_rto_min_usstatic inline u32 tcp_rto_min_us(struct sock *sk)
tcp_ca_dst_lockedstatic inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
tcp_min_rttMinimum RTT in usec
tcp_receive_windowCompute the actual receive window we are currently advertising
tcp_clock_nsstatic inline u64 tcp_clock_ns(void)
tcp_clock_usstatic inline u64 tcp_clock_us(void)
tcp_time_stampThis should only be used in contexts where tp->tcp_mstamp is up to date
tcp_ns_to_tsConvert a nsec timestamp into TCP TSval timestamp (ms based currently)
tcp_time_stamp_rawCould use tcp_clock_us() / 1000, but this version uses a single divide
tcp_stamp_us_deltastatic inline u32 tcp_stamp_us_delta(u64t1, u64t0)
tcp_skb_timestampstatic inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
tcp_skb_timestamp_usprovide the departure time in us unit
bpf_compute_data_end_sk_skbstatic inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
tcp_skb_bpf_ingressstatic inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb)
tcp_skb_bpf_redirect_fetchstatic inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb)
tcp_skb_bpf_redirect_clearstatic inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb)
tcp_v6_iifThis is the variant of inet6_iif() that must be used by TCP, * as TCP moves IP6CB into a different location in skb->cb[]
tcp_v6_iif_l3_slavestatic inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
tcp_v6_sdifTCP_SKB_CB reference means this can not be used from early demux
inet_exact_dif_matchstatic inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
tcp_v4_sdifTCP_SKB_CB reference means this can not be used from early demux
tcp_skb_pcountDue to TSO, an SKB can be composed of multiple actual * packets
tcp_skb_pcount_setstatic inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
tcp_skb_pcount_addstatic inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
tcp_skb_mssThis is valid iff skb is in write queue and tcp_skb_pcount() > 1
tcp_skb_can_collapse_tostatic inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
tcp_ca_get_name_by_keystatic inline char *tcp_ca_get_name_by_key(u32key, char *buffer)
tcp_ca_needs_ecnstatic inline bool tcp_ca_needs_ecn(const struct sock *sk)
tcp_set_ca_statestatic inline void tcp_set_ca_state(struct sock *sk, const u8ca_state)
tcp_ca_eventstatic inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
tcp_is_sackThese functions determine how the current flow behaves in respect of SACK * handling. SACK is negotiated with the peer, and therefore it can vary * between different flows
tcp_is_renostatic inline bool tcp_is_reno(const struct tcp_sock *tp)
tcp_left_outstatic inline unsigned int tcp_left_out(const struct tcp_sock *tp)
tcp_packets_in_flightThis determines how many packets are "in the network" to the best * of our knowledge. In many cases it is conservative, but where * detailed information is available from the receiver (via SACK * blocks etc.) we can make more aggressive calculations
tcp_in_slow_startstatic inline bool tcp_in_slow_start(const struct tcp_sock *tp)
tcp_in_initial_slowstartstatic inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
tcp_in_cwnd_reductionstatic inline bool tcp_in_cwnd_reduction(const struct sock *sk)
tcp_current_ssthreshIf cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd
tcp_max_tso_deferred_mssThe maximum number of MSS of available cwnd for which TSO defers * sending if not using sysctl_tcp_tso_win_divisor.
tcp_wnd_endReturns end sequence number of the receiver's advertised window
tcp_is_cwnd_limitedWe follow the spirit of RFC2861 to validate cwnd but implement a more * flexible approach. The RFC suggests cwnd should not be raised unless * it was fully used previously. And that's exactly what we do in * congestion avoidance mode. But in slow start we allow cwnd to grow * as long as the application has used half the cwnd
tcp_needs_internal_pacingBBR congestion control needs pacing
tcp_pacing_delayReturn in jiffies the delay before one skb is sent
tcp_reset_xmit_timerstatic inline void tcp_reset_xmit_timer(struct sock *sk, const int what, unsigned long when, const unsigned long max_when, const struct sk_buff *skb)
tcp_probe0_baseSomething is really bad, we could not queue an additional packet, * because qdisc is full or receiver sent a 0 window, or we are paced
tcp_probe0_whenVariant of inet_csk_rto_backoff() used for zero window probes
tcp_check_probe_timerstatic inline void tcp_check_probe_timer(struct sock *sk)
tcp_init_wlstatic inline void tcp_init_wl(struct tcp_sock *tp, u32seq)
tcp_update_wlstatic inline void tcp_update_wl(struct tcp_sock *tp, u32seq)
tcp_v4_checkCalculate(/check) TCP checksum
tcp_checksum_completestatic inline bool tcp_checksum_complete(struct sk_buff *skb)
tcp_sack_resetstatic inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
tcp_slow_start_after_idle_checkstatic inline void tcp_slow_start_after_idle_check(struct sock *sk)
tcp_win_from_spacestatic inline int tcp_win_from_space(const struct sock *sk, int space)
tcp_spacecaller must be prepared to deal with negative returns
tcp_full_spacestatic inline int tcp_full_space(const struct sock *sk)
keepalive_intvl_whenstatic inline int keepalive_intvl_when(const struct tcp_sock *tp)
keepalive_time_whenstatic inline int keepalive_time_when(const struct tcp_sock *tp)
keepalive_probesstatic inline int keepalive_probes(const struct tcp_sock *tp)
keepalive_time_elapsedstatic inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
tcp_fin_timestatic inline int tcp_fin_time(const struct sock *sk)
tcp_paws_checkstatic inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, int paws_win)
tcp_paws_rejectstatic inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt, int rst)
tcp_mib_initstatic inline void tcp_mib_init(struct net *net)
tcp_clear_retrans_hints_partialfrom STCP
tcp_clear_all_retrans_hintsstatic inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
tcp_md5_do_lookupstatic inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, const union tcp_md5_addr *addr, int family)
tcp_put_md5sig_poolstatic inline void tcp_put_md5sig_pool(void)
tcp_fastopen_get_ctxCaller needs to wrap with rcu_read_(un)lock()
tcp_fastopen_cookie_matchstatic inline bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc, const struct tcp_fastopen_cookie *orig)
tcp_fastopen_context_lenstatic inline int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx)
tcp_skb_tsorted_anchor_cleanupThis helper is needed, because skb->tcp_tsorted_anchor uses * the same memory storage than skb->destructor/_skb_refdst
tcp_rtx_queue_headstatic inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk)
tcp_rtx_queue_tailstatic inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk)
tcp_write_queue_headstatic inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
tcp_write_queue_tailstatic inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
tcp_send_headstatic inline struct sk_buff *tcp_send_head(const struct sock *sk)
tcp_skb_is_laststatic inline bool tcp_skb_is_last(const struct sock *sk, const struct sk_buff *skb)
tcp_write_queue_emptytest if any payload (or FIN) is available in write queue
tcp_rtx_queue_emptystatic inline bool tcp_rtx_queue_empty(const struct sock *sk)
tcp_rtx_and_write_queues_emptystatic inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk)
tcp_add_write_queue_tailstatic inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
tcp_insert_write_queue_beforeInsert new before skb on the write queue of sk
tcp_unlink_write_queuestatic inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
tcp_rtx_queue_unlinkstatic inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk)
tcp_rtx_queue_unlink_and_freestatic inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk)
tcp_push_pending_framesstatic inline void tcp_push_pending_frames(struct sock *sk)
tcp_highest_sack_seqStart sequence of the skb just after the highest skb with SACKed * bit, valid only if sacked_out > 0 or when the caller has ensured * validity by itself.
tcp_advance_highest_sackstatic inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
tcp_highest_sackstatic inline struct sk_buff *tcp_highest_sack(struct sock *sk)
tcp_highest_sack_resetstatic inline void tcp_highest_sack_reset(struct sock *sk)
tcp_highest_sack_replaceCalled when old skb is about to be deleted and replaced by new skb
inet_sk_transparentThis helper checks if socket has IP_TRANSPARENT set
tcp_stream_is_thinDetermines whether this is a thin stream (which may suffer from * increased latency)
tcp_notsent_lowatstatic inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
tcp_stream_memory_free@wake is one when sk_stream_write_space() calls us
cookie_init_sequencestatic inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, const struct sock *sk, struct sk_buff *skb, __u16 *mss)
tcp_rto_delta_usAt how many usecs into the future should the RTO fire?
tcp_v4_save_optionsSave and compile IPv4 options, return a pointer to it
skb_is_tcp_pure_acklocally generated TCP pure ACKs have skb->truesize == 2 * (check tcp_send_ack() in net/ipv4/tcp_output.c ) * This is much faster than dissecting the packet to find out
skb_set_tcp_pure_ackstatic inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
tcp_inqstatic inline int tcp_inq(struct sock *sk)
tcp_segs_instatic inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
tcp_listendropTCP listen path runs lockless
tcp_call_bpfstatic inline int tcp_call_bpf(struct sock *sk, int op, u32nargs, u32 *args)
tcp_call_bpf_2argstatic inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32arg1, u32arg2)
tcp_call_bpf_3argstatic inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32arg1, u32arg2, u32arg3)
tcp_timeout_initstatic inline u32 tcp_timeout_init(struct sock *sk)
tcp_rwnd_init_bpfstatic inline u32 tcp_rwnd_init_bpf(struct sock *sk)
tcp_bpf_ca_needs_ecnstatic inline bool tcp_bpf_ca_needs_ecn(struct sock *sk)
tcp_bpf_rttstatic inline void tcp_bpf_rtt(struct sock *sk)
tcp_add_tx_delaystatic inline void tcp_add_tx_delay(struct sk_buff *skb, const struct tcp_sock *tp)
tcp_transmit_timeCompute Earliest Departure Time for some control packets * like ACK or RST for TIME_WAIT or non ESTABLISHED sockets.