34 #ifndef _RTE_IP_FRAG_H_ 35 #define _RTE_IP_FRAG_H_ 85 TAILQ_ENTRY(ip_frag_pkt) lru;
86 struct ip_frag_key key;
91 struct ip_frag frags[IP_MAX_FRAG_NUM];
94 #define IP_FRAG_DEATH_ROW_LEN 32 97 struct rte_ip_frag_death_row { 125 struct ip_pkt_list lru;
127 __extension__
struct ip_frag_pkt pkt[0];
131 #define RTE_IPV6_EHDR_MF_SHIFT 0 132 #define RTE_IPV6_EHDR_MF_MASK 1 133 #define RTE_IPV6_EHDR_FO_SHIFT 3 134 #define RTE_IPV6_EHDR_FO_MASK (~((1 << RTE_IPV6_EHDR_FO_SHIFT) - 1)) 136 #define RTE_IPV6_FRAG_USED_MASK \ 137 (RTE_IPV6_EHDR_MF_MASK | RTE_IPV6_EHDR_FO_MASK) 139 #define RTE_IPV6_GET_MF(x) ((x) & RTE_IPV6_EHDR_MF_MASK) 140 #define RTE_IPV6_GET_FO(x) ((x) >> RTE_IPV6_EHDR_FO_SHIFT) 142 #define RTE_IPV6_SET_FRAG_DATA(fo, mf) \ 143 (((fo) & RTE_IPV6_EHDR_FO_MASK) | ((mf) & RTE_IPV6_EHDR_MF_MASK)) 145 struct ipv6_extension_fragment {
150 } __attribute__((__packed__));
213 uint16_t nb_pkts_out,
242 struct ipv6_extension_fragment *frag_hdr);
255 static inline struct ipv6_extension_fragment *
258 if (hdr->
proto == IPPROTO_FRAGMENT) {
259 return (
struct ipv6_extension_fragment *) ++hdr;
290 uint16_t nb_pkts_out, uint16_t mtu_size,
327 uint16_t flag_offset, ip_flag, ip_ofs;
330 ip_ofs = (uint16_t)(flag_offset & IPV4_HDR_OFFSET_MASK);
331 ip_flag = (uint16_t)(flag_offset & IPV4_HDR_MF_FLAG);
333 return ip_flag != 0 || ip_ofs != 0;
static void rte_ip_frag_table_destroy(struct rte_ip_frag_tbl *tbl)
struct rte_mbuf * rte_ipv4_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms, struct ipv4_hdr *ip_hdr)
static struct ipv6_extension_fragment * rte_ipv6_frag_get_ipv6_fragment_header(struct ipv6_hdr *hdr)
static uint16_t rte_be_to_cpu_16(uint16_t x)
void rte_ip_frag_free_death_row(struct rte_ip_frag_death_row *dr, uint32_t prefetch)
int32_t rte_ipv4_fragment_packet(struct rte_mbuf *pkt_in, struct rte_mbuf **pkts_out, uint16_t nb_pkts_out, uint16_t mtu_size, struct rte_mempool *pool_direct, struct rte_mempool *pool_indirect)
struct rte_ip_frag_tbl * rte_ip_frag_table_create(uint32_t bucket_num, uint32_t bucket_entries, uint32_t max_entries, uint64_t max_cycles, int socket_id)
TAILQ_HEAD(rte_driver_list, rte_driver)
#define IP_FRAG_DEATH_ROW_LEN
#define __rte_cache_aligned
struct ip_frag_pkt * last
int32_t rte_ipv6_fragment_packet(struct rte_mbuf *pkt_in, struct rte_mbuf **pkts_out, uint16_t nb_pkts_out, uint16_t mtu_size, struct rte_mempool *pool_direct, struct rte_mempool *pool_indirect)
static int rte_ipv4_frag_pkt_is_fragmented(const struct ipv4_hdr *hdr)
void rte_ip_frag_table_statistics_dump(FILE *f, const struct rte_ip_frag_tbl *tbl)
struct rte_mbuf * rte_ipv6_frag_reassemble_packet(struct rte_ip_frag_tbl *tbl, struct rte_ip_frag_death_row *dr, struct rte_mbuf *mb, uint64_t tms, struct ipv6_hdr *ip_hdr, struct ipv6_extension_fragment *frag_hdr)