º¯Êý | Ãû³Æ |
tcp_under_memory_pressure | optimized version of sk_under_memory_pressure() for TCP sockets |
before | The next routines deal with comparing 32 bit unsigned ints
* and worry about wraparound (automatic with unsigned arithmetic). |
between | is s2<=s1<=s3 ? |
tcp_out_of_memory | static inline bool tcp_out_of_memory(struct sock *sk) |
tcp_too_many_orphans | static inline bool tcp_too_many_orphans(struct sock *sk, int shift) |
tcp_dec_quickack_mode | static inline void tcp_dec_quickack_mode(struct sock *sk, const unsigned int pkts) |
tcp_clear_xmit_timers | static inline void tcp_clear_xmit_timers(struct sock *sk) |
tcp_bound_to_half_wnd | Bound MSS / TSO packet size with the half of the window |
tcp_bound_rto | static inline void tcp_bound_rto(const struct sock *sk) |
__tcp_set_rto | static inline u32 __tcp_set_rto(const struct tcp_sock *tp) |
__tcp_fast_path_on | static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32snd_wnd) |
tcp_fast_path_on | static inline void tcp_fast_path_on(struct tcp_sock *tp) |
tcp_fast_path_check | static inline void tcp_fast_path_check(struct sock *sk) |
tcp_rto_min | Compute the actual rto_min value |
tcp_rto_min_us | static inline u32 tcp_rto_min_us(struct sock *sk) |
tcp_ca_dst_locked | static inline bool tcp_ca_dst_locked(const struct dst_entry *dst) |
tcp_min_rtt | Minimum RTT in usec |
tcp_receive_window | Compute the actual receive window we are currently advertising |
tcp_clock_ns | static inline u64 tcp_clock_ns(void) |
tcp_clock_us | static inline u64 tcp_clock_us(void) |
tcp_time_stamp | This should only be used in contexts where tp->tcp_mstamp is up to date |
tcp_ns_to_ts | Convert a nsec timestamp into TCP TSval timestamp (ms based currently) |
tcp_time_stamp_raw | Could use tcp_clock_us() / 1000, but this version uses a single divide |
tcp_stamp_us_delta | static inline u32 tcp_stamp_us_delta(u64t1, u64t0) |
tcp_skb_timestamp | static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) |
tcp_skb_timestamp_us | provide the departure time in us unit |
bpf_compute_data_end_sk_skb | static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb) |
tcp_skb_bpf_ingress | static inline bool tcp_skb_bpf_ingress(const struct sk_buff *skb) |
tcp_skb_bpf_redirect_fetch | static inline struct sock *tcp_skb_bpf_redirect_fetch(struct sk_buff *skb) |
tcp_skb_bpf_redirect_clear | static inline void tcp_skb_bpf_redirect_clear(struct sk_buff *skb) |
tcp_v6_iif | This is the variant of inet6_iif() that must be used by TCP,
* as TCP moves IP6CB into a different location in skb->cb[] |
tcp_v6_iif_l3_slave | static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb) |
tcp_v6_sdif | TCP_SKB_CB reference means this can not be used from early demux |
inet_exact_dif_match | static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb) |
tcp_v4_sdif | TCP_SKB_CB reference means this can not be used from early demux |
tcp_skb_pcount | Due to TSO, an SKB can be composed of multiple actual
* packets |
tcp_skb_pcount_set | static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs) |
tcp_skb_pcount_add | static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs) |
tcp_skb_mss | This is valid iff skb is in write queue and tcp_skb_pcount() > 1 |
tcp_skb_can_collapse_to | static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb) |
tcp_ca_get_name_by_key | static inline char *tcp_ca_get_name_by_key(u32key, char *buffer) |
tcp_ca_needs_ecn | static inline bool tcp_ca_needs_ecn(const struct sock *sk) |
tcp_set_ca_state | static inline void tcp_set_ca_state(struct sock *sk, const u8ca_state) |
tcp_ca_event | static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) |
tcp_is_sack | These functions determine how the current flow behaves in respect of SACK
* handling. SACK is negotiated with the peer, and therefore it can vary
* between different flows |
tcp_is_reno | static inline bool tcp_is_reno(const struct tcp_sock *tp) |
tcp_left_out | static inline unsigned int tcp_left_out(const struct tcp_sock *tp) |
tcp_packets_in_flight | This determines how many packets are "in the network" to the best
* of our knowledge. In many cases it is conservative, but where
* detailed information is available from the receiver (via SACK
* blocks etc.) we can make more aggressive calculations |
tcp_in_slow_start | static inline bool tcp_in_slow_start(const struct tcp_sock *tp) |
tcp_in_initial_slowstart | static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) |
tcp_in_cwnd_reduction | static inline bool tcp_in_cwnd_reduction(const struct sock *sk) |
tcp_current_ssthresh | If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd |
tcp_max_tso_deferred_mss | The maximum number of MSS of available cwnd for which TSO defers
* sending if not using sysctl_tcp_tso_win_divisor. |
tcp_wnd_end | Returns end sequence number of the receiver's advertised window |
tcp_is_cwnd_limited | We follow the spirit of RFC2861 to validate cwnd but implement a more
* flexible approach. The RFC suggests cwnd should not be raised unless
* it was fully used previously. And that's exactly what we do in
* congestion avoidance mode. But in slow start we allow cwnd to grow
* as long as the application has used half the cwnd |
tcp_needs_internal_pacing | BBR congestion control needs pacing |
tcp_pacing_delay | Return in jiffies the delay before one skb is sent |
tcp_reset_xmit_timer | static inline void tcp_reset_xmit_timer(struct sock *sk, const int what, unsigned long when, const unsigned long max_when, const struct sk_buff *skb) |
tcp_probe0_base | Something is really bad, we could not queue an additional packet,
* because qdisc is full or receiver sent a 0 window, or we are paced |
tcp_probe0_when | Variant of inet_csk_rto_backoff() used for zero window probes |
tcp_check_probe_timer | static inline void tcp_check_probe_timer(struct sock *sk) |
tcp_init_wl | static inline void tcp_init_wl(struct tcp_sock *tp, u32seq) |
tcp_update_wl | static inline void tcp_update_wl(struct tcp_sock *tp, u32seq) |
tcp_v4_check | Calculate(/check) TCP checksum |
tcp_checksum_complete | static inline bool tcp_checksum_complete(struct sk_buff *skb) |
tcp_sack_reset | static inline void tcp_sack_reset(struct tcp_options_received *rx_opt) |
tcp_slow_start_after_idle_check | static inline void tcp_slow_start_after_idle_check(struct sock *sk) |
tcp_win_from_space | static inline int tcp_win_from_space(const struct sock *sk, int space) |
tcp_space | caller must be prepared to deal with negative returns |
tcp_full_space | static inline int tcp_full_space(const struct sock *sk) |
keepalive_intvl_when | static inline int keepalive_intvl_when(const struct tcp_sock *tp) |
keepalive_time_when | static inline int keepalive_time_when(const struct tcp_sock *tp) |
keepalive_probes | static inline int keepalive_probes(const struct tcp_sock *tp) |
keepalive_time_elapsed | static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) |
tcp_fin_time | static inline int tcp_fin_time(const struct sock *sk) |
tcp_paws_check | static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt, int paws_win) |
tcp_paws_reject | static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt, int rst) |
tcp_mib_init | static inline void tcp_mib_init(struct net *net) |
tcp_clear_retrans_hints_partial | from STCP |
tcp_clear_all_retrans_hints | static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp) |
tcp_md5_do_lookup | static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, const union tcp_md5_addr *addr, int family) |
tcp_put_md5sig_pool | static inline void tcp_put_md5sig_pool(void) |
tcp_fastopen_get_ctx | Caller needs to wrap with rcu_read_(un)lock() |
tcp_fastopen_cookie_match | static inline bool tcp_fastopen_cookie_match(const struct tcp_fastopen_cookie *foc, const struct tcp_fastopen_cookie *orig) |
tcp_fastopen_context_len | static inline int tcp_fastopen_context_len(const struct tcp_fastopen_context *ctx) |
tcp_skb_tsorted_anchor_cleanup | This helper is needed, because skb->tcp_tsorted_anchor uses
* the same memory storage than skb->destructor/_skb_refdst |
tcp_rtx_queue_head | static inline struct sk_buff *tcp_rtx_queue_head(const struct sock *sk) |
tcp_rtx_queue_tail | static inline struct sk_buff *tcp_rtx_queue_tail(const struct sock *sk) |
tcp_write_queue_head | static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk) |
tcp_write_queue_tail | static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk) |
tcp_send_head | static inline struct sk_buff *tcp_send_head(const struct sock *sk) |
tcp_skb_is_last | static inline bool tcp_skb_is_last(const struct sock *sk, const struct sk_buff *skb) |
tcp_write_queue_empty | test if any payload (or FIN) is available in write queue |
tcp_rtx_queue_empty | static inline bool tcp_rtx_queue_empty(const struct sock *sk) |
tcp_rtx_and_write_queues_empty | static inline bool tcp_rtx_and_write_queues_empty(const struct sock *sk) |
tcp_add_write_queue_tail | static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb) |
tcp_insert_write_queue_before | Insert new before skb on the write queue of sk |
tcp_unlink_write_queue | static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk) |
tcp_rtx_queue_unlink | static inline void tcp_rtx_queue_unlink(struct sk_buff *skb, struct sock *sk) |
tcp_rtx_queue_unlink_and_free | static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct sock *sk) |
tcp_push_pending_frames | static inline void tcp_push_pending_frames(struct sock *sk) |
tcp_highest_sack_seq | Start sequence of the skb just after the highest skb with SACKed
* bit, valid only if sacked_out > 0 or when the caller has ensured
* validity by itself. |
tcp_advance_highest_sack | static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb) |
tcp_highest_sack | static inline struct sk_buff *tcp_highest_sack(struct sock *sk) |
tcp_highest_sack_reset | static inline void tcp_highest_sack_reset(struct sock *sk) |
tcp_highest_sack_replace | Called when old skb is about to be deleted and replaced by new skb |
inet_sk_transparent | This helper checks if socket has IP_TRANSPARENT set |
tcp_stream_is_thin | Determines whether this is a thin stream (which may suffer from
* increased latency) |
tcp_notsent_lowat | static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) |
tcp_stream_memory_free | @wake is one when sk_stream_write_space() calls us |
cookie_init_sequence | static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops, const struct sock *sk, struct sk_buff *skb, __u16 *mss) |
tcp_rto_delta_us | At how many usecs into the future should the RTO fire? |
tcp_v4_save_options | Save and compile IPv4 options, return a pointer to it |
skb_is_tcp_pure_ack | locally generated TCP pure ACKs have skb->truesize == 2
* (check tcp_send_ack() in net/ipv4/tcp_output.c )
* This is much faster than dissecting the packet to find out |
skb_set_tcp_pure_ack | static inline void skb_set_tcp_pure_ack(struct sk_buff *skb) |
tcp_inq | static inline int tcp_inq(struct sock *sk) |
tcp_segs_in | static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb) |
tcp_listendrop | TCP listen path runs lockless |
tcp_call_bpf | static inline int tcp_call_bpf(struct sock *sk, int op, u32nargs, u32 *args) |
tcp_call_bpf_2arg | static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32arg1, u32arg2) |
tcp_call_bpf_3arg | static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32arg1, u32arg2, u32arg3) |
tcp_timeout_init | static inline u32 tcp_timeout_init(struct sock *sk) |
tcp_rwnd_init_bpf | static inline u32 tcp_rwnd_init_bpf(struct sock *sk) |
tcp_bpf_ca_needs_ecn | static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) |
tcp_bpf_rtt | static inline void tcp_bpf_rtt(struct sock *sk) |
tcp_add_tx_delay | static inline void tcp_add_tx_delay(struct sk_buff *skb, const struct tcp_sock *tp) |
tcp_transmit_time | Compute Earliest Departure Time for some control packets
* like ACK or RST for TIME_WAIT or non ESTABLISHED sockets. |