30#include <rte_compat.h>
35#ifndef RTE_BBDEV_MAX_DEVS
36#define RTE_BBDEV_MAX_DEVS 128
44#define RTE_BBDEV_ENQ_STATUS_SIZE_MAX 6
87#define RTE_BBDEV_FOREACH(i) for (i = rte_bbdev_find_next(-1); \
88 i < RTE_BBDEV_MAX_DEVS; \
89 i = rte_bbdev_find_next(i))
360#define RTE_BBDEV_END_OF_CAPABILITIES_LIST() \
361 { RTE_BBDEV_OP_NONE }
423struct rte_bbdev_queue_data {
432typedef uint16_t (*rte_bbdev_enqueue_enc_ops_t)(
433 struct rte_bbdev_queue_data *q_data,
438typedef uint16_t (*rte_bbdev_enqueue_dec_ops_t)(
439 struct rte_bbdev_queue_data *q_data,
444typedef uint16_t (*rte_bbdev_enqueue_fft_ops_t)(
445 struct rte_bbdev_queue_data *q_data,
450typedef uint16_t (*rte_bbdev_enqueue_mldts_ops_t)(
451 struct rte_bbdev_queue_data *q_data,
456typedef uint16_t (*rte_bbdev_dequeue_enc_ops_t)(
457 struct rte_bbdev_queue_data *q_data,
461typedef uint16_t (*rte_bbdev_dequeue_dec_ops_t)(
462 struct rte_bbdev_queue_data *q_data,
466typedef uint16_t (*rte_bbdev_dequeue_fft_ops_t)(
467 struct rte_bbdev_queue_data *q_data,
471typedef uint16_t (*rte_bbdev_dequeue_mldts_ops_t)(
472 struct rte_bbdev_queue_data *q_data,
475#define RTE_BBDEV_NAME_MAX_LEN 64
483struct rte_bbdev_data {
487 struct rte_bbdev_queue_data *queues;
491 RTE_ATOMIC(uint16_t) process_cnt;
496struct rte_bbdev_callback;
497struct rte_intr_handle;
508 rte_bbdev_enqueue_enc_ops_t enqueue_enc_ops;
510 rte_bbdev_enqueue_dec_ops_t enqueue_dec_ops;
512 rte_bbdev_dequeue_enc_ops_t dequeue_enc_ops;
514 rte_bbdev_dequeue_dec_ops_t dequeue_dec_ops;
516 rte_bbdev_enqueue_enc_ops_t enqueue_ldpc_enc_ops;
518 rte_bbdev_enqueue_dec_ops_t enqueue_ldpc_dec_ops;
520 rte_bbdev_dequeue_enc_ops_t dequeue_ldpc_enc_ops;
522 rte_bbdev_dequeue_dec_ops_t dequeue_ldpc_dec_ops;
524 rte_bbdev_enqueue_fft_ops_t enqueue_fft_ops;
526 rte_bbdev_dequeue_fft_ops_t dequeue_fft_ops;
528 struct rte_bbdev_data *data;
530 struct rte_device *device;
532 struct rte_bbdev_cb_list list_cbs;
533 struct rte_intr_handle *intr_handle;
535 rte_bbdev_enqueue_mldts_ops_t enqueue_mldts_ops;
537 rte_bbdev_dequeue_mldts_ops_t dequeue_mldts_ops;
541extern struct rte_bbdev rte_bbdev_devices[];
564static inline uint16_t
568 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
569 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
570 return dev->enqueue_enc_ops(q_data, ops, num_ops);
594static inline uint16_t
598 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
599 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
600 return dev->enqueue_dec_ops(q_data, ops, num_ops);
624static inline uint16_t
628 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
629 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
630 return dev->enqueue_ldpc_enc_ops(q_data, ops, num_ops);
654static inline uint16_t
658 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
659 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
660 return dev->enqueue_ldpc_dec_ops(q_data, ops, num_ops);
684static inline uint16_t
688 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
689 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
690 return dev->enqueue_fft_ops(q_data, ops, num_ops);
714static inline uint16_t
718 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
719 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
720 return dev->enqueue_mldts_ops(q_data, ops, num_ops);
745static inline uint16_t
749 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
750 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
751 return dev->dequeue_enc_ops(q_data, ops, num_ops);
777static inline uint16_t
781 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
782 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
783 return dev->dequeue_dec_ops(q_data, ops, num_ops);
808static inline uint16_t
812 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
813 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
814 return dev->dequeue_ldpc_enc_ops(q_data, ops, num_ops);
838static inline uint16_t
842 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
843 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
844 return dev->dequeue_ldpc_dec_ops(q_data, ops, num_ops);
868static inline uint16_t
872 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
873 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
874 return dev->dequeue_fft_ops(q_data, ops, num_ops);
899static inline uint16_t
903 struct rte_bbdev *dev = &rte_bbdev_devices[dev_id];
904 struct rte_bbdev_queue_data *q_data = &dev->data->queues[queue_id];
905 return dev->dequeue_mldts_ops(q_data, ops, num_ops);
int rte_bbdev_start(uint16_t dev_id)
@ RTE_BBDEV_DEV_FATAL_ERR
@ RTE_BBDEV_DEV_CORRECT_ERR
@ RTE_BBDEV_DEV_NOT_SUPPORTED
@ RTE_BBDEV_DEV_RECONFIG_REQ
@ RTE_BBDEV_DEV_CONFIGURED
@ RTE_BBDEV_DEV_RESTART_REQ
int rte_bbdev_queue_intr_enable(uint16_t dev_id, uint16_t queue_id)
@ RTE_BBDEV_ENQ_STATUS_INVALID_OP
@ RTE_BBDEV_ENQ_STATUS_RING_FULL
@ RTE_BBDEV_ENQ_STATUS_QUEUE_FULL
@ RTE_BBDEV_ENQ_STATUS_NONE
int rte_bbdev_setup_queues(uint16_t dev_id, uint16_t num_queues, int socket_id)
RTE_TAILQ_HEAD(rte_bbdev_cb_list, rte_bbdev_callback)
static uint16_t rte_bbdev_dequeue_fft_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_fft_op **ops, uint16_t num_ops)
int rte_bbdev_callback_register(uint16_t dev_id, enum rte_bbdev_event_type event, rte_bbdev_cb_fn cb_fn, void *cb_arg)
int rte_bbdev_stop(uint16_t dev_id)
int rte_bbdev_close(uint16_t dev_id)
uint16_t rte_bbdev_find_next(uint16_t dev_id)
static uint16_t rte_bbdev_dequeue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
static uint16_t rte_bbdev_enqueue_mldts_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_mldts_op **ops, uint16_t num_ops)
int rte_bbdev_info_get(uint16_t dev_id, struct rte_bbdev_info *dev_info)
uint16_t rte_bbdev_count(void)
int rte_bbdev_stats_reset(uint16_t dev_id)
static uint16_t rte_bbdev_enqueue_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
@ RTE_BBDEV_EVENT_DEQUEUE
@ RTE_BBDEV_EVENT_UNKNOWN
int rte_bbdev_queue_configure(uint16_t dev_id, uint16_t queue_id, const struct rte_bbdev_queue_conf *conf)
const char * rte_bbdev_device_status_str(enum rte_bbdev_device_status status)
static uint16_t rte_bbdev_enqueue_fft_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_fft_op **ops, uint16_t num_ops)
void(* rte_bbdev_cb_fn)(uint16_t dev_id, enum rte_bbdev_event_type event, void *cb_arg, void *ret_param)
static uint16_t rte_bbdev_dequeue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
int rte_bbdev_queue_info_get(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_queue_info *queue_info)
static uint16_t rte_bbdev_dequeue_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
static uint16_t rte_bbdev_dequeue_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
static __rte_experimental uint16_t rte_bbdev_dequeue_mldts_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_mldts_op **ops, uint16_t num_ops)
static uint16_t rte_bbdev_enqueue_ldpc_enc_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_enc_op **ops, uint16_t num_ops)
bool rte_bbdev_is_valid(uint16_t dev_id)
int rte_bbdev_queue_start(uint16_t dev_id, uint16_t queue_id)
const char * rte_bbdev_enqueue_status_str(enum rte_bbdev_enqueue_status status)
static uint16_t rte_bbdev_enqueue_ldpc_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
int rte_bbdev_stats_get(uint16_t dev_id, struct rte_bbdev_stats *stats)
int rte_bbdev_callback_unregister(uint16_t dev_id, enum rte_bbdev_event_type event, rte_bbdev_cb_fn cb_fn, void *cb_arg)
int rte_bbdev_intr_enable(uint16_t dev_id)
int rte_bbdev_queue_intr_disable(uint16_t dev_id, uint16_t queue_id)
int rte_bbdev_queue_intr_ctl(uint16_t dev_id, uint16_t queue_id, int epfd, int op, void *data)
static uint16_t rte_bbdev_enqueue_dec_ops(uint16_t dev_id, uint16_t queue_id, struct rte_bbdev_dec_op **ops, uint16_t num_ops)
int rte_bbdev_queue_stop(uint16_t dev_id, uint16_t queue_id)
#define RTE_BBDEV_NAME_MAX_LEN
#define __rte_cache_aligned
unsigned int max_num_queues
uint8_t max_dl_queue_priority
struct rte_bbdev_queue_conf default_queue_conf
unsigned int num_queues[RTE_BBDEV_OP_TYPE_SIZE_MAX]
unsigned int queue_priority[RTE_BBDEV_OP_TYPE_SIZE_MAX]
enum rte_cpu_flag_t * cpu_flag_reqs
uint32_t harq_buffer_size
const struct rte_bbdev_op_cap * capabilities
bool queue_intr_supported
enum rte_bbdev_device_status device_status
uint8_t max_ul_queue_priority
uint16_t * fft_window_width
bool hardware_accelerated
struct rte_bbdev_driver_info drv
const struct rte_device * device
enum rte_bbdev_op_type op_type
struct rte_bbdev_queue_conf conf
uint64_t enqueue_status_count[RTE_BBDEV_ENQ_STATUS_SIZE_MAX]
uint64_t acc_offload_cycles
uint64_t enqueue_err_count
uint64_t enqueue_warn_count
uint64_t dequeue_err_count
uint64_t dequeue_warn_count