39 #ifndef _RTE_CUCKOO_HASH_H_ 40 #define _RTE_CUCKOO_HASH_H_ 42 #if defined(RTE_ARCH_X86) 43 #include "rte_cmp_x86.h" 46 #if defined(RTE_ARCH_ARM64) 47 #include "rte_cmp_arm64.h" 51 #if defined(RTE_LIBRTE_HASH_DEBUG) 52 #define RETURN_IF_TRUE(cond, retval) do { \ 57 #define RETURN_IF_TRUE(cond, retval) 61 #if defined(RTE_MACHINE_CPUFLAG_SSE4_2) || defined(RTE_MACHINE_CPUFLAG_CRC32) 63 #define DEFAULT_HASH_FUNC rte_hash_crc 66 #define DEFAULT_HASH_FUNC rte_jhash 69 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64) 74 enum cmp_jump_table_case {
100 rte_hash_k112_cmp_eq,
101 rte_hash_k128_cmp_eq,
109 enum cmp_jump_table_case {
127 ADD_KEY_SINGLEWRITER = 0,
129 ADD_KEY_MULTIWRITER_TM,
133 #define RTE_HASH_BUCKET_ENTRIES 4 135 #define NULL_SIGNATURE 0 137 #define KEY_ALIGNMENT 16 139 #define LCORE_CACHE_SIZE 64 141 #define RTE_HASH_MAX_PUSHES 100 143 #define RTE_HASH_BFS_QUEUE_MAX_LEN 1000 145 #define RTE_XABORT_CUCKOO_PATH_INVALIDED 0x4 147 #define RTE_HASH_TSX_MAX_RETRY 10 151 void *objs[LCORE_CACHE_SIZE];
155 struct rte_hash_signatures {
166 struct rte_hash_key {
173 } __attribute__((aligned(KEY_ALIGNMENT)));
177 struct rte_hash_signatures signatures[RTE_HASH_BUCKET_ENTRIES];
179 uint32_t key_idx[RTE_HASH_BUCKET_ENTRIES + 1];
180 uint8_t flag[RTE_HASH_BUCKET_ENTRIES];
193 enum cmp_jump_table_case cmp_jump_table_idx;
209 enum add_key_case add_key;
217 struct queue_node *prev;
uint32_t(* rte_hash_function)(const void *key, uint32_t key_len, uint32_t init_val)
rte_hash_function hash_func
#define RTE_HASH_NAMESIZE
struct rte_ring * free_slots
rte_spinlock_t * multiwriter_lock
int(* rte_hash_cmp_eq_t)(const void *key1, const void *key2, size_t key_len)
struct lcore_cache * local_free_slots
uint8_t hw_trans_mem_support
#define __rte_cache_aligned
rte_hash_cmp_eq_t rte_hash_custom_cmp_eq
uint32_t hash_func_init_val
struct rte_hash_bucket * buckets