DPDK 23.11.2
Loading...
Searching...
No Matches
rte_eventdev.h
Go to the documentation of this file.
1/* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016 Cavium, Inc.
3 * Copyright(c) 2016-2018 Intel Corporation.
4 * Copyright 2016 NXP
5 * All rights reserved.
6 */
7
8#ifndef _RTE_EVENTDEV_H_
9#define _RTE_EVENTDEV_H_
10
208#ifdef __cplusplus
209extern "C" {
210#endif
211
212#include <rte_compat.h>
213#include <rte_common.h>
214#include <rte_errno.h>
215#include <rte_mbuf_pool_ops.h>
216#include <rte_mempool.h>
217
219
220struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
221struct rte_event;
222
223/* Event device capability bitmap flags */
224#define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
236#define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
243#define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
252#define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
259#define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
267#define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5)
278#define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6)
288#define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7)
294#define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
300#define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
306#define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
316#define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
323#define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
329/* Event device priority levels */
330#define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
335#define RTE_EVENT_DEV_PRIORITY_NORMAL 128
340#define RTE_EVENT_DEV_PRIORITY_LOWEST 255
346/* Event queue scheduling weights */
347#define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
351#define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
356/* Event queue scheduling affinity */
357#define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
361#define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
373uint8_t
375
386int
387rte_event_dev_get_dev_id(const char *name);
388
399int
401
460
475int
476rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
477
481#define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
485#define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
489#define RTE_EVENT_DEV_ATTR_STARTED 2
490
503int
504rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
505 uint32_t *attr_value);
506
507
508/* Event device configuration bitmap flags */
509#define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
587
607int
609 const struct rte_event_dev_config *dev_conf);
610
611/* Event queue specific APIs */
612
613/* Event queue configuration bitmap flags */
614#define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
620#define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
682
704int
705rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
706 struct rte_event_queue_conf *queue_conf);
707
726int
727rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
728 const struct rte_event_queue_conf *queue_conf);
729
733#define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
737#define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
741#define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
745#define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
749#define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
753#define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
757#define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
758
779int
780rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
781 uint32_t *attr_value);
782
801int
802rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
803 uint64_t attr_value);
804
805/* Event port specific APIs */
806
807/* Event port configuration bitmap flags */
808#define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0)
815#define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1)
820#define RTE_EVENT_PORT_CFG_HINT_PRODUCER (1ULL << 2)
830#define RTE_EVENT_PORT_CFG_HINT_CONSUMER (1ULL << 3)
841#define RTE_EVENT_PORT_CFG_HINT_WORKER (1ULL << 4)
882
904int
905rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
906 struct rte_event_port_conf *port_conf);
907
928int
929rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
930 const struct rte_event_port_conf *port_conf);
931
932typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
933 struct rte_event event, void *arg);
963void
964rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
965 rte_eventdev_port_flush_t release_cb, void *args);
966
970#define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
974#define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
978#define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
982#define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
983
1000int
1001rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
1002 uint32_t *attr_value);
1003
1020int
1021rte_event_dev_start(uint8_t dev_id);
1022
1041void
1042rte_event_dev_stop(uint8_t dev_id);
1043
1044typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1045 struct rte_event event, void *arg);
1075 rte_eventdev_stop_flush_t callback, void *userdata);
1076
1088int
1089rte_event_dev_close(uint8_t dev_id);
1090
1095 uint16_t nb_elem;
1097 uint16_t elem_offset : 12;
1099 uint16_t rsvd : 3;
1101 uint16_t attr_valid : 1;
1104 union {
1105 /* Used by Rx/Tx adapter.
1106 * Indicates that all the elements in this vector belong to the
1107 * same port and queue pair when originating from Rx adapter,
1108 * valid only when event type is ETHDEV_VECTOR or
1109 * ETH_RX_ADAPTER_VECTOR.
1110 * Can also be used to indicate the Tx adapter the destination
1111 * port and queue of the mbufs in the vector
1112 */
1113 struct {
1114 uint16_t port;
1115 uint16_t queue;
1116 };
1117 };
1119 uint64_t impl_opaque;
1120
1121/* empty structures do not have zero size in C++ leading to compilation errors
1122 * with clang about structure having different sizes in C and C++.
1123 * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1124 * C++ builds, removing the warning.
1125 */
1126#ifndef __cplusplus
1132 union {
1133#endif
1134 struct rte_mbuf *mbufs[0];
1135 void *ptrs[0];
1136 uint64_t u64s[0];
1137#ifndef __cplusplus
1138 } __rte_aligned(16);
1139#endif
1144#ifndef __DOXYGEN__
1145} __rte_aligned(16);
1146#else
1147};
1148#endif
1149
1150/* Scheduler type definitions */
1151#define RTE_SCHED_TYPE_ORDERED 0
1178#define RTE_SCHED_TYPE_ATOMIC 1
1197#define RTE_SCHED_TYPE_PARALLEL 2
1210/* Event types to classify the event source */
1211#define RTE_EVENT_TYPE_ETHDEV 0x0
1213#define RTE_EVENT_TYPE_CRYPTODEV 0x1
1215#define RTE_EVENT_TYPE_TIMER 0x2
1217#define RTE_EVENT_TYPE_CPU 0x3
1221#define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
1223#define RTE_EVENT_TYPE_DMADEV 0x5
1225#define RTE_EVENT_TYPE_VECTOR 0x8
1237#define RTE_EVENT_TYPE_ETHDEV_VECTOR \
1238 (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1240#define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1242#define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \
1243 (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1245#define RTE_EVENT_TYPE_CRYPTODEV_VECTOR \
1246 (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1249#define RTE_EVENT_TYPE_MAX 0x10
1252/* Event enqueue operations */
1253#define RTE_EVENT_OP_NEW 0
1257#define RTE_EVENT_OP_FORWARD 1
1265#define RTE_EVENT_OP_RELEASE 2
1303 union {
1304 uint64_t event;
1306 struct {
1307 uint32_t flow_id:20;
1314 uint32_t sub_event_type:8;
1318 uint32_t event_type:4;
1322 uint8_t op:2;
1328 uint8_t rsvd:4;
1330 uint8_t sched_type:2;
1335 uint8_t queue_id;
1342 uint8_t priority;
1359 };
1360 };
1362 union {
1363 uint64_t u64;
1371 };
1372};
1373
1374/* Ethdev Rx adapter capability bitmap flags */
1375#define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1379#define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1383#define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1390#define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8
1411int
1412rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1413 uint32_t *caps);
1414
1415#define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1418#define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC (1ULL << 1)
1434int
1435rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1436
1437/* Crypto adapter capability bitmap flag */
1438#define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1445#define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1452#define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
1457#define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
1462#define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR 0x10
1486int
1487rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1488 uint32_t *caps);
1489
1490/* DMA adapter capability bitmap flag */
1491#define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1498#define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1505#define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1528__rte_experimental
1529int
1530rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1531
1532/* Ethdev Tx adapter capability bitmap flags */
1533#define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1
1536#define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2
1557int
1558rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1559 uint32_t *caps);
1560
1585int
1586rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
1587 uint64_t *timeout_ticks);
1588
1652int
1653rte_event_port_link(uint8_t dev_id, uint8_t port_id,
1654 const uint8_t queues[], const uint8_t priorities[],
1655 uint16_t nb_links);
1656
1700int
1701rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
1702 uint8_t queues[], uint16_t nb_unlinks);
1703
1776__rte_experimental
1777int
1778rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
1779 const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
1780
1829__rte_experimental
1830int
1831rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1832 uint16_t nb_unlinks, uint8_t profile_id);
1833
1855int
1856rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
1857
1884int
1885rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
1886 uint8_t queues[], uint8_t priorities[]);
1887
1919__rte_experimental
1920int
1921rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
1922 uint8_t priorities[], uint8_t profile_id);
1923
1939int
1940rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
1941
1955int
1956rte_event_dev_dump(uint8_t dev_id, FILE *f);
1957
1959#define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
1960
1965 RTE_EVENT_DEV_XSTATS_DEVICE,
1966 RTE_EVENT_DEV_XSTATS_PORT,
1967 RTE_EVENT_DEV_XSTATS_QUEUE,
1968};
1969
1979
2012int
2014 enum rte_event_dev_xstats_mode mode,
2015 uint8_t queue_port_id,
2016 struct rte_event_dev_xstats_name *xstats_names,
2017 uint64_t *ids,
2018 unsigned int size);
2019
2046int
2048 enum rte_event_dev_xstats_mode mode,
2049 uint8_t queue_port_id,
2050 const uint64_t ids[],
2051 uint64_t values[], unsigned int n);
2052
2069uint64_t
2070rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2071 uint64_t *id);
2072
2093int
2095 enum rte_event_dev_xstats_mode mode,
2096 int16_t queue_port_id,
2097 const uint64_t ids[],
2098 uint32_t nb_ids);
2099
2110int rte_event_dev_selftest(uint8_t dev_id);
2111
2142struct rte_mempool *
2143rte_event_vector_pool_create(const char *name, unsigned int n,
2144 unsigned int cache_size, uint16_t nb_elem,
2145 int socket_id);
2146
2147#include <rte_eventdev_core.h>
2148
2149static __rte_always_inline uint16_t
2150__rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2151 const struct rte_event ev[], uint16_t nb_events,
2152 const event_enqueue_burst_t fn)
2153{
2154 const struct rte_event_fp_ops *fp_ops;
2155 void *port;
2156
2157 fp_ops = &rte_event_fp_ops[dev_id];
2158 port = fp_ops->data[port_id];
2159#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2160 if (dev_id >= RTE_EVENT_MAX_DEVS ||
2161 port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2162 rte_errno = EINVAL;
2163 return 0;
2164 }
2165
2166 if (port == NULL) {
2167 rte_errno = EINVAL;
2168 return 0;
2169 }
2170#endif
2171 rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2172 /*
2173 * Allow zero cost non burst mode routine invocation if application
2174 * requests nb_events as const one
2175 */
2176 if (nb_events == 1)
2177 return (fp_ops->enqueue)(port, ev);
2178 else
2179 return fn(port, ev, nb_events);
2180}
2181
2225static inline uint16_t
2226rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2227 const struct rte_event ev[], uint16_t nb_events)
2228{
2229 const struct rte_event_fp_ops *fp_ops;
2230
2231 fp_ops = &rte_event_fp_ops[dev_id];
2232 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2233 fp_ops->enqueue_burst);
2234}
2235
2277static inline uint16_t
2278rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2279 const struct rte_event ev[], uint16_t nb_events)
2280{
2281 const struct rte_event_fp_ops *fp_ops;
2282
2283 fp_ops = &rte_event_fp_ops[dev_id];
2284 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2285 fp_ops->enqueue_new_burst);
2286}
2287
2329static inline uint16_t
2330rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2331 const struct rte_event ev[], uint16_t nb_events)
2332{
2333 const struct rte_event_fp_ops *fp_ops;
2334
2335 fp_ops = &rte_event_fp_ops[dev_id];
2336 return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2337 fp_ops->enqueue_forward_burst);
2338}
2339
2406static inline uint16_t
2407rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2408 uint16_t nb_events, uint64_t timeout_ticks)
2409{
2410 const struct rte_event_fp_ops *fp_ops;
2411 void *port;
2412
2413 fp_ops = &rte_event_fp_ops[dev_id];
2414 port = fp_ops->data[port_id];
2415#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2416 if (dev_id >= RTE_EVENT_MAX_DEVS ||
2417 port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2418 rte_errno = EINVAL;
2419 return 0;
2420 }
2421
2422 if (port == NULL) {
2423 rte_errno = EINVAL;
2424 return 0;
2425 }
2426#endif
2427 rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2428 /*
2429 * Allow zero cost non burst mode routine invocation if application
2430 * requests nb_events as const one
2431 */
2432 if (nb_events == 1)
2433 return (fp_ops->dequeue)(port, ev, timeout_ticks);
2434 else
2435 return (fp_ops->dequeue_burst)(port, ev, nb_events,
2436 timeout_ticks);
2437}
2438
2439#define RTE_EVENT_DEV_MAINT_OP_FLUSH (1 << 0)
2481static inline int
2482rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2483{
2484 const struct rte_event_fp_ops *fp_ops;
2485 void *port;
2486
2487 fp_ops = &rte_event_fp_ops[dev_id];
2488 port = fp_ops->data[port_id];
2489#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2490 if (dev_id >= RTE_EVENT_MAX_DEVS ||
2491 port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2492 return -EINVAL;
2493
2494 if (port == NULL)
2495 return -EINVAL;
2496
2497 if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2498 return -EINVAL;
2499#endif
2500 rte_eventdev_trace_maintain(dev_id, port_id, op);
2501
2502 if (fp_ops->maintain != NULL)
2503 fp_ops->maintain(port, op);
2504
2505 return 0;
2506}
2507
2529static inline uint8_t
2530rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2531{
2532 const struct rte_event_fp_ops *fp_ops;
2533 void *port;
2534
2535 fp_ops = &rte_event_fp_ops[dev_id];
2536 port = fp_ops->data[port_id];
2537
2538#ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2539 if (dev_id >= RTE_EVENT_MAX_DEVS ||
2540 port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2541 return -EINVAL;
2542
2543 if (port == NULL)
2544 return -EINVAL;
2545
2546 if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2547 return -EINVAL;
2548#endif
2549 rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2550
2551 return fp_ops->profile_switch(port, profile_id);
2552}
2553
2554#ifdef __cplusplus
2555}
2556#endif
2557
2558#endif /* _RTE_EVENTDEV_H_ */
#define __rte_aligned(a)
Definition rte_common.h:70
#define __rte_always_inline
Definition rte_common.h:331
#define rte_errno
Definition rte_errno.h:29
int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[])
int rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
int rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint64_t attr_value)
void(* rte_eventdev_port_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks)
int rte_event_port_link(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links)
static uint16_t rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
static uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks)
int rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
static uint8_t rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
rte_event_dev_xstats_mode
__rte_experimental int rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id)
int rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
int rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf)
int rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, struct rte_event_queue_conf *queue_conf)
int rte_event_dev_selftest(uint8_t dev_id)
int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
int rte_event_dev_stop_flush_callback_register(uint8_t dev_id, rte_eventdev_stop_flush_t callback, void *userdata)
void rte_event_dev_stop(uint8_t dev_id)
uint8_t rte_event_dev_count(void)
struct rte_mempool * rte_event_vector_pool_create(const char *name, unsigned int n, unsigned int cache_size, uint16_t nb_elem, int socket_id)
int rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, uint32_t *caps)
void rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, rte_eventdev_port_flush_t release_cb, void *args)
int rte_event_dev_xstats_reset(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, const uint64_t ids[], uint32_t nb_ids)
int rte_event_dev_dump(uint8_t dev_id, FILE *f)
int rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
int rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, uint32_t *attr_value)
int rte_event_dev_get_dev_id(const char *name)
uint64_t rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, uint64_t *id)
int rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, const uint64_t ids[], uint64_t values[], unsigned int n)
#define RTE_EVENT_DEV_MAINT_OP_FLUSH
static uint16_t rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
void(* rte_eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
int rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint32_t *attr_value)
__rte_experimental int rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[], uint8_t profile_id)
static int rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
int rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, uint32_t *attr_value)
int rte_event_dev_start(uint8_t dev_id)
int rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, struct rte_event_port_conf *port_conf)
int rte_event_dev_xstats_names_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, struct rte_event_dev_xstats_name *xstats_names, uint64_t *ids, unsigned int size)
int rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf)
__rte_experimental int rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps)
int rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks)
__rte_experimental int rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks, uint8_t profile_id)
#define RTE_EVENT_DEV_XSTATS_NAME_SIZE
int rte_event_dev_socket_id(uint8_t dev_id)
int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf)
int rte_event_dev_close(uint8_t dev_id)
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
uint8_t nb_single_link_event_port_queues
uint32_t nb_event_port_enqueue_depth
uint32_t nb_event_queue_flows
uint32_t nb_event_port_dequeue_depth
uint8_t max_event_port_links
uint32_t max_event_port_enqueue_depth
uint32_t dequeue_timeout_ns
uint32_t min_dequeue_timeout_ns
uint32_t max_event_queue_flows
uint8_t max_event_port_dequeue_depth
uint8_t max_event_queue_priority_levels
uint8_t max_profiles_per_port
uint8_t max_event_priority_levels
const char * driver_name
uint32_t max_dequeue_timeout_ns
struct rte_device * dev
uint8_t max_single_link_event_port_queue_pairs
uint32_t nb_atomic_order_sequences
uint8_t priority
uint32_t flow_id
uint8_t rsvd
uint32_t event_type
struct rte_mbuf * mbuf
uint8_t queue_id
uint8_t sched_type
uint64_t u64
struct rte_event_vector * vec
uint8_t impl_opaque
uint32_t sub_event_type
void * event_ptr
char name[RTE_MEMPOOL_NAMESIZE]
uint32_t cache_size