65#include <dev_backend.h>
69static _enso_always_inline
void try_clflush([[maybe_unused]]
void* addr) {
77 const std::string& huge_page_prefix) {
78 DevBackend* fpga_dev = DevBackend::Create(bdf, bar);
79 if (unlikely(fpga_dev ==
nullptr)) {
80 std::cerr <<
"Could not create device" << std::endl;
83 notification_buf_pair->fpga_dev = fpga_dev;
85 int notif_pipe_id = fpga_dev->AllocateNotifBuf();
87 if (notif_pipe_id < 0) {
88 std::cerr <<
"Could not allocate notification buffer" << std::endl;
92 notification_buf_pair->id = notif_pipe_id;
94 void* uio_mmap_bar2_addr =
95 fpga_dev->uio_mmap((1 << 12) * (kMaxNbFlows + kMaxNbApps), 2);
96 if (uio_mmap_bar2_addr == MAP_FAILED) {
97 std::cerr <<
"Could not get mmap uio memory!" << std::endl;
101 notification_buf_pair->uio_mmap_bar2_addr = uio_mmap_bar2_addr;
106 volatile struct QueueRegs* notification_buf_pair_regs =
107 (
struct QueueRegs*)((uint8_t*)uio_mmap_bar2_addr +
108 (notif_pipe_id + kMaxNbFlows) * kMemorySpacePerQueue);
111 DevBackend::mmio_write32(¬ification_buf_pair_regs->rx_mem_low, 0);
112 DevBackend::mmio_write32(¬ification_buf_pair_regs->rx_mem_high, 0);
113 while (DevBackend::mmio_read32(¬ification_buf_pair_regs->rx_mem_low) != 0)
116 while (DevBackend::mmio_read32(¬ification_buf_pair_regs->rx_mem_high) != 0)
119 DevBackend::mmio_write32(¬ification_buf_pair_regs->rx_tail, 0);
120 while (DevBackend::mmio_read32(¬ification_buf_pair_regs->rx_tail) != 0)
123 DevBackend::mmio_write32(¬ification_buf_pair_regs->rx_head, 0);
124 while (DevBackend::mmio_read32(¬ification_buf_pair_regs->rx_head) != 0)
127 std::string huge_page_path = huge_page_prefix +
128 std::string(kHugePageNotifBufPathPrefix) +
129 std::to_string(notification_buf_pair->id);
131 notification_buf_pair->regs = (
struct QueueRegs*)notification_buf_pair_regs;
132 notification_buf_pair->rx_buf =
134 if (notification_buf_pair->rx_buf == NULL) {
135 std::cerr <<
"Could not get huge page" << std::endl;
139 memset(notification_buf_pair->rx_buf, 0, kNotificationBufSize * 64);
142 notification_buf_pair->tx_buf =
143 (
struct TxNotification*)((uint64_t)notification_buf_pair->rx_buf +
146 memset(notification_buf_pair->tx_buf, 0, kNotificationBufSize * 64);
149 fpga_dev->ConvertVirtAddrToDevAddr(notification_buf_pair->rx_buf);
151 notification_buf_pair->rx_head_ptr =
152 (uint32_t*)¬ification_buf_pair_regs->rx_head;
153 notification_buf_pair->tx_tail_ptr =
154 (uint32_t*)¬ification_buf_pair_regs->tx_tail;
156 notification_buf_pair->rx_head =
157 DevBackend::mmio_read32(notification_buf_pair->rx_head_ptr);
160 notification_buf_pair->tx_tail =
161 DevBackend::mmio_read32(notification_buf_pair->tx_tail_ptr);
163 notification_buf_pair->tx_head = notification_buf_pair->tx_tail;
165 DevBackend::mmio_write32(¬ification_buf_pair_regs->tx_head,
166 notification_buf_pair->tx_head);
168 notification_buf_pair->pending_rx_pipe_tails = (uint32_t*)malloc(
169 sizeof(*(notification_buf_pair->pending_rx_pipe_tails)) * kMaxNbFlows);
170 if (notification_buf_pair->pending_rx_pipe_tails == NULL) {
171 std::cerr <<
"Could not allocate memory" << std::endl;
174 memset(notification_buf_pair->pending_rx_pipe_tails, 0, kMaxNbFlows);
176 notification_buf_pair->wrap_tracker =
177 (uint8_t*)malloc(kNotificationBufSize / 8);
178 if (notification_buf_pair->wrap_tracker == NULL) {
179 std::cerr <<
"Could not allocate memory" << std::endl;
182 memset(notification_buf_pair->wrap_tracker, 0, kNotificationBufSize / 8);
184 notification_buf_pair->next_rx_pipe_ids =
185 (enso_pipe_id_t*)malloc(kNotificationBufSize *
sizeof(enso_pipe_id_t));
186 if (notification_buf_pair->next_rx_pipe_ids == NULL) {
187 std::cerr <<
"Could not allocate memory" << std::endl;
191 notification_buf_pair->next_rx_ids_head = 0;
192 notification_buf_pair->next_rx_ids_tail = 0;
193 notification_buf_pair->tx_full_cnt = 0;
194 notification_buf_pair->nb_unreported_completions = 0;
195 notification_buf_pair->huge_page_prefix = huge_page_prefix;
199 DevBackend::mmio_write32(¬ification_buf_pair_regs->rx_mem_low,
200 (uint32_t)phys_addr);
201 DevBackend::mmio_write32(¬ification_buf_pair_regs->rx_mem_high,
202 (uint32_t)(phys_addr >> 32));
206 DevBackend::mmio_write32(¬ification_buf_pair_regs->tx_mem_low,
207 (uint32_t)phys_addr);
208 DevBackend::mmio_write32(¬ification_buf_pair_regs->tx_mem_high,
209 (uint32_t)(phys_addr >> 32));
217 void* uio_mmap_bar2_addr = notification_buf_pair->uio_mmap_bar2_addr;
218 DevBackend* fpga_dev =
219 static_cast<DevBackend*
>(notification_buf_pair->fpga_dev);
221 int enso_pipe_id = fpga_dev->AllocatePipe(fallback);
223 if (enso_pipe_id < 0) {
224 std::cerr <<
"Could not allocate pipe" << std::endl;
229 volatile struct QueueRegs* enso_pipe_regs =
230 (
struct QueueRegs*)((uint8_t*)uio_mmap_bar2_addr +
231 enso_pipe_id * kMemorySpacePerQueue);
232 enso_pipe->regs = (
struct QueueRegs*)enso_pipe_regs;
235 DevBackend::mmio_write32(&enso_pipe_regs->rx_mem_low, 0);
236 DevBackend::mmio_write32(&enso_pipe_regs->rx_mem_high, 0);
237 while (DevBackend::mmio_read32(&enso_pipe_regs->rx_mem_low) != 0 ||
238 DevBackend::mmio_read32(&enso_pipe_regs->rx_mem_high) != 0)
242 DevBackend::mmio_write32(&enso_pipe_regs->rx_tail, 0);
243 while (DevBackend::mmio_read32(&enso_pipe_regs->rx_tail) != 0)
continue;
245 DevBackend::mmio_write32(&enso_pipe_regs->rx_head, 0);
246 while (DevBackend::mmio_read32(&enso_pipe_regs->rx_head) != 0)
continue;
248 std::string huge_page_path = notification_buf_pair->huge_page_prefix +
249 std::string(kHugePageRxPipePathPrefix) +
250 std::to_string(enso_pipe_id);
252 enso_pipe->buf = (uint32_t*)
get_huge_page(huge_page_path, 0,
true);
253 if (enso_pipe->buf == NULL) {
254 std::cerr <<
"Could not get huge page" << std::endl;
257 uint64_t phys_addr = fpga_dev->ConvertVirtAddrToDevAddr(enso_pipe->buf);
259 enso_pipe->buf_phys_addr = phys_addr;
260 enso_pipe->phys_buf_offset = phys_addr - (uint64_t)(enso_pipe->buf);
262 enso_pipe->id = enso_pipe_id;
263 enso_pipe->buf_head_ptr = (uint32_t*)&enso_pipe_regs->rx_head;
264 enso_pipe->rx_head = 0;
265 enso_pipe->rx_tail = 0;
266 enso_pipe->huge_page_prefix = notification_buf_pair->huge_page_prefix;
269 notification_buf_pair->pending_rx_pipe_tails[enso_pipe->id] =
275 DevBackend::mmio_write32(&enso_pipe_regs->rx_mem_low,
276 (uint32_t)phys_addr + notification_buf_pair->id);
277 DevBackend::mmio_write32(&enso_pipe_regs->rx_mem_high,
278 (uint32_t)(phys_addr >> 32));
287 const std::string& huge_page_prefix,
bool fallback) {
288 printf(
"Running with NOTIFICATION_BUF_SIZE: %i\n", kNotificationBufSize);
289 printf(
"Running with ENSO_PIPE_SIZE: %i\n", kEnsoPipeSize);
291 int16_t core_id = sched_getcpu();
293 std::cerr <<
"Could not get CPU id" << std::endl;
298 if (notification_buf_pair->ref_cnt == 0) {
306 ++(notification_buf_pair->ref_cnt);
308 return enso_pipe_init(enso_pipe, notification_buf_pair, fallback);
311static _enso_always_inline uint16_t
312__get_new_tails(
struct NotificationBufPair* notification_buf_pair) {
313 struct RxNotification* notification_buf = notification_buf_pair->rx_buf;
314 uint32_t notification_buf_head = notification_buf_pair->rx_head;
315 uint16_t nb_consumed_notifications = 0;
317 uint16_t next_rx_ids_tail = notification_buf_pair->next_rx_ids_tail;
319 for (uint16_t i = 0; i < kBatchSize; ++i) {
320 struct RxNotification* cur_notification =
321 notification_buf + notification_buf_head;
324 if (cur_notification->signal == 0) {
328 cur_notification->signal = 0;
329 notification_buf_head = (notification_buf_head + 1) % kNotificationBufSize;
331 enso_pipe_id_t enso_pipe_id = cur_notification->queue_id;
332 notification_buf_pair->pending_rx_pipe_tails[enso_pipe_id] =
333 (uint32_t)cur_notification->tail;
335 notification_buf_pair->next_rx_pipe_ids[next_rx_ids_tail] = enso_pipe_id;
336 next_rx_ids_tail = (next_rx_ids_tail + 1) % kNotificationBufSize;
338 ++nb_consumed_notifications;
341 notification_buf_pair->next_rx_ids_tail = next_rx_ids_tail;
343 if (likely(nb_consumed_notifications > 0)) {
345 DevBackend::mmio_write32(notification_buf_pair->rx_head_ptr,
346 notification_buf_head);
347 notification_buf_pair->rx_head = notification_buf_head;
350 return nb_consumed_notifications;
354 return __get_new_tails(notification_buf_pair);
357static _enso_always_inline uint32_t
358__consume_queue(
struct RxEnsoPipeInternal* enso_pipe,
359 struct NotificationBufPair* notification_buf_pair,
void** buf,
361 uint32_t* enso_pipe_buf = enso_pipe->buf;
362 uint32_t enso_pipe_head = enso_pipe->rx_tail;
363 int queue_id = enso_pipe->id;
365 *buf = &enso_pipe_buf[enso_pipe_head * 16];
367 uint32_t enso_pipe_tail =
368 notification_buf_pair->pending_rx_pipe_tails[queue_id];
370 if (enso_pipe_tail == enso_pipe_head) {
374 uint32_t flit_aligned_size =
375 ((enso_pipe_tail - enso_pipe_head) % ENSO_PIPE_SIZE) * 64;
378 enso_pipe_head = (enso_pipe_head + flit_aligned_size / 64) % ENSO_PIPE_SIZE;
379 enso_pipe->rx_tail = enso_pipe_head;
382 return flit_aligned_size;
388 return __consume_queue(enso_pipe, notification_buf_pair, buf);
394 return __consume_queue(enso_pipe, notification_buf_pair, buf,
true);
397static _enso_always_inline int32_t
398__get_next_enso_pipe_id(
struct NotificationBufPair* notification_buf_pair) {
404 uint16_t next_rx_ids_head = notification_buf_pair->next_rx_ids_head;
405 uint16_t next_rx_ids_tail = notification_buf_pair->next_rx_ids_tail;
407 if (next_rx_ids_head == next_rx_ids_tail) {
408 uint16_t nb_consumed_notifications = __get_new_tails(notification_buf_pair);
409 if (unlikely(nb_consumed_notifications == 0)) {
414 enso_pipe_id_t enso_pipe_id =
415 notification_buf_pair->next_rx_pipe_ids[next_rx_ids_head];
417 notification_buf_pair->next_rx_ids_head =
418 (next_rx_ids_head + 1) % kNotificationBufSize;
425 return __get_next_enso_pipe_id(notification_buf_pair);
431 int* enso_pipe_id,
void** buf) {
432 int32_t __enso_pipe_id = __get_next_enso_pipe_id(notification_buf_pair);
434 if (unlikely(__enso_pipe_id == -1)) {
438 *enso_pipe_id = __enso_pipe_id;
440 struct SocketInternal* socket_entry = &socket_entries[__enso_pipe_id];
443 return __consume_queue(enso_pipe, notification_buf_pair, buf);
447 uint32_t rx_pkt_head = enso_pipe->rx_head;
448 uint32_t nb_flits = ((uint64_t)len - 1) / 64 + 1;
449 rx_pkt_head = (rx_pkt_head + nb_flits) % ENSO_PIPE_SIZE;
451 DevBackend::mmio_write32(enso_pipe->buf_head_ptr, rx_pkt_head);
452 enso_pipe->rx_head = rx_pkt_head;
456 DevBackend::mmio_write32(enso_pipe->buf_head_ptr, enso_pipe->rx_tail);
457 enso_pipe->rx_head = enso_pipe->rx_tail;
461 DevBackend::mmio_write32(enso_pipe->buf_head_ptr, enso_pipe->rx_head);
464static _enso_always_inline uint32_t
465__send_to_queue(
struct NotificationBufPair* notification_buf_pair,
466 uint64_t phys_addr, uint32_t len) {
467 struct TxNotification* tx_buf = notification_buf_pair->tx_buf;
468 uint32_t tx_tail = notification_buf_pair->tx_tail;
469 uint32_t missing_bytes = len;
471 uint64_t transf_addr = phys_addr;
472 uint64_t hugepage_mask = ~((uint64_t)kBufPageSize - 1);
473 uint64_t hugepage_base_addr = transf_addr & hugepage_mask;
474 uint64_t hugepage_boundary = hugepage_base_addr + kBufPageSize;
476 while (missing_bytes > 0) {
477 uint32_t free_slots =
478 (notification_buf_pair->tx_head - tx_tail - 1) % kNotificationBufSize;
481 while (unlikely(free_slots == 0)) {
482 ++notification_buf_pair->tx_full_cnt;
483 update_tx_head(notification_buf_pair);
485 (notification_buf_pair->tx_head - tx_tail - 1) % kNotificationBufSize;
488 struct TxNotification* tx_notification = tx_buf + tx_tail;
489 uint32_t req_length = std::min(missing_bytes, (uint32_t)kMaxTransferLen);
490 uint32_t missing_bytes_in_page = hugepage_boundary - transf_addr;
491 req_length = std::min(req_length, missing_bytes_in_page);
495 uint8_t wrap_tracker_mask = (missing_bytes > req_length) << (tx_tail & 0x7);
496 notification_buf_pair->wrap_tracker[tx_tail / 8] |= wrap_tracker_mask;
498 tx_notification->length = req_length;
499 tx_notification->signal = 1;
500 tx_notification->phys_addr = transf_addr;
502 uint64_t huge_page_offset = (transf_addr + req_length) % kBufPageSize;
503 transf_addr = hugepage_base_addr + huge_page_offset;
505 tx_tail = (tx_tail + 1) % kNotificationBufSize;
506 missing_bytes -= req_length;
509 notification_buf_pair->tx_tail = tx_tail;
510 DevBackend::mmio_write32(notification_buf_pair->tx_tail_ptr, tx_tail);
516 uint64_t phys_addr, uint32_t len) {
517 return __send_to_queue(notification_buf_pair, phys_addr, len);
522 uint32_t completions;
524 completions = notification_buf_pair->nb_unreported_completions;
525 notification_buf_pair->nb_unreported_completions = 0;
531 struct TxNotification* tx_buf = notification_buf_pair->tx_buf;
532 uint32_t head = notification_buf_pair->tx_head;
533 uint32_t tail = notification_buf_pair->tx_tail;
540 for (uint16_t i = 0; i < kBatchSize; ++i) {
544 struct TxNotification* tx_notification = tx_buf + head;
547 if (tx_notification->signal != 0) {
557 uint8_t wrap_tracker_mask = 1 << (head & 0x7);
559 !(notification_buf_pair->wrap_tracker[head / 8] & wrap_tracker_mask);
560 notification_buf_pair->nb_unreported_completions += no_wrap;
561 notification_buf_pair->wrap_tracker[head / 8] &= ~wrap_tracker_mask;
563 head = (head + 1) % kNotificationBufSize;
566 notification_buf_pair->tx_head = head;
570 struct TxNotification* config_notification) {
571 struct TxNotification* tx_buf = notification_buf_pair->tx_buf;
572 uint32_t tx_tail = notification_buf_pair->tx_tail;
573 uint32_t free_slots =
574 (notification_buf_pair->tx_head - tx_tail - 1) % kNotificationBufSize;
577 if (config_notification->signal < 2) {
582 while (unlikely(free_slots == 0)) {
583 ++notification_buf_pair->tx_full_cnt;
586 (notification_buf_pair->tx_head - tx_tail - 1) % kNotificationBufSize;
589 struct TxNotification* tx_notification = tx_buf + tx_tail;
590 *tx_notification = *config_notification;
592 tx_tail = (tx_tail + 1) % kNotificationBufSize;
593 notification_buf_pair->tx_tail = tx_tail;
594 DevBackend::mmio_write32(notification_buf_pair->tx_tail_ptr, tx_tail);
597 uint32_t nb_unreported_completions =
598 notification_buf_pair->nb_unreported_completions;
599 while (notification_buf_pair->nb_unreported_completions ==
600 nb_unreported_completions) {
603 notification_buf_pair->nb_unreported_completions = nb_unreported_completions;
609 DevBackend* fpga_dev =
610 static_cast<DevBackend*
>(notification_buf_pair->fpga_dev);
611 return fpga_dev->GetNbFallbackQueues();
616 DevBackend* fpga_dev =
617 static_cast<DevBackend*
>(notification_buf_pair->fpga_dev);
618 return fpga_dev->SetRrStatus(round_robin);
622 DevBackend* fpga_dev =
623 static_cast<DevBackend*
>(notification_buf_pair->fpga_dev);
624 return fpga_dev->GetRrStatus();
629 DevBackend* fpga_dev =
630 static_cast<DevBackend*
>(notification_buf_pair->fpga_dev);
631 uint64_t dev_addr = fpga_dev->ConvertVirtAddrToDevAddr(virt_addr);
636 DevBackend* fpga_dev =
637 static_cast<DevBackend*
>(notification_buf_pair->fpga_dev);
639 fpga_dev->FreeNotifBuf(notification_buf_pair->id);
641 DevBackend::mmio_write32(¬ification_buf_pair->regs->rx_mem_low, 0);
642 DevBackend::mmio_write32(¬ification_buf_pair->regs->rx_mem_high, 0);
643 DevBackend::mmio_write32(¬ification_buf_pair->regs->tx_mem_low, 0);
644 DevBackend::mmio_write32(¬ification_buf_pair->regs->tx_mem_high, 0);
648 std::string huge_page_path = notification_buf_pair->huge_page_prefix +
649 std::string(kHugePageNotifBufPathPrefix) +
650 std::to_string(notification_buf_pair->id);
652 unlink(huge_page_path.c_str());
654 free(notification_buf_pair->pending_rx_pipe_tails);
655 free(notification_buf_pair->wrap_tracker);
656 free(notification_buf_pair->next_rx_pipe_ids);
663 enso_pipe_id_t enso_pipe_id) {
664 DevBackend* fpga_dev =
665 static_cast<DevBackend*
>(notification_buf_pair->fpga_dev);
667 DevBackend::mmio_write32(&enso_pipe->regs->rx_mem_low, 0);
668 DevBackend::mmio_write32(&enso_pipe->regs->rx_mem_high, 0);
670 if (enso_pipe->buf) {
671 munmap(enso_pipe->buf, kBufPageSize);
672 std::string huge_page_path = enso_pipe->huge_page_prefix +
673 std::string(kHugePageRxPipePathPrefix) +
674 std::to_string(enso_pipe_id);
675 unlink(huge_page_path.c_str());
676 enso_pipe->buf =
nullptr;
679 fpga_dev->FreePipe(enso_pipe_id);
686 socket_entry->notification_buf_pair;
690 enso_pipe_id_t enso_pipe_id = enso_pipe->id;
692 if (notification_buf_pair->ref_cnt == 0) {
698 if (notification_buf_pair->ref_cnt == 1) {
702 --(notification_buf_pair->ref_cnt);
708 return (uint32_t)socket_entry->enso_pipe.id;
713 socket_entry->notification_buf_pair;
716 printf(
"TX notification buffer full counter: %lu\n\n",
717 notification_buf_pair->tx_full_cnt);
718 printf(
"Dsc RX head: %d\n", notification_buf_pair->rx_head);
719 printf(
"Dsc TX tail: %d\n", notification_buf_pair->tx_tail);
720 printf(
"Dsc TX head: %d\n\n", notification_buf_pair->tx_head);
723 printf(
"Pkt RX tail: %d\n", socket_entry->enso_pipe.rx_tail);
724 printf(
"Pkt RX head: %d\n", socket_entry->enso_pipe.rx_head);
Functions to configure the data plane.
int update_fallback_queues_config(struct NotificationBufPair *notification_buf_pair)
Update the device's fallback queues configuration.
Constants used throughout the codebase. Some of these constants need to be kept in sync with the hard...
constexpr uint32_t kAlignedDscBufPairSize
Miscellaneous helper functions.
void * get_huge_page(const std::string &path, size_t size=0, bool mirror=false)
int notification_buf_init(uint32_t bdf, int32_t bar, struct NotificationBufPair *notification_buf_pair, const std::string &huge_page_prefix)
Initializes the notification buffer pair.
void prefetch_pipe(struct RxEnsoPipeInternal *enso_pipe)
Prefetches a given Enso Pipe.
void update_tx_head(struct NotificationBufPair *notification_buf_pair)
Updates the tx head and the number of TX completions.
uint16_t get_new_tails(struct NotificationBufPair *notification_buf_pair)
Gets latest tails for the pipes associated with the given notification buffer.
void print_stats(struct SocketInternal *socket_entry, bool print_global)
Prints statistics for a given socket.
void fully_advance_pipe(struct RxEnsoPipeInternal *enso_pipe)
Frees all the received bytes in the buffer associated with the socket_entry socket.
uint32_t get_next_batch(struct NotificationBufPair *notification_buf_pair, struct SocketInternal *socket_entries, int *enso_pipe_id, void **buf)
Get next batch of data from the next available Enso Pipe.
uint32_t get_next_batch_from_queue(struct RxEnsoPipeInternal *enso_pipe, struct NotificationBufPair *notification_buf_pair, void **buf)
Gets the next batch of data from the given Enso Pipe.
int set_round_robin_status(struct NotificationBufPair *notification_buf_pair, bool round_robin)
Sets the round robin status for the device.
int32_t get_next_enso_pipe_id(struct NotificationBufPair *notification_buf_pair)
Get next Enso Pipe with pending data.
uint64_t get_dev_addr_from_virt_addr(struct NotificationBufPair *notification_buf_pair, void *virt_addr)
Converts an address in the application's virtual address space to an address that can be used by the ...
void notification_buf_free(struct NotificationBufPair *notification_buf_pair)
Frees the notification buffer pair.
int send_config(struct NotificationBufPair *notification_buf_pair, struct TxNotification *config_notification)
Sends configuration to the NIC.
uint32_t peek_next_batch_from_queue(struct RxEnsoPipeInternal *enso_pipe, struct NotificationBufPair *notification_buf_pair, void **buf)
Gets the next batch of data from the given Enso Pipe without consuming it. So the next call to get_ne...
uint32_t send_to_queue(struct NotificationBufPair *notification_buf_pair, uint64_t phys_addr, uint32_t len)
Sends data through a given queue.
uint32_t get_unreported_completions(struct NotificationBufPair *notification_buf_pair)
Returns the number of transmission requests that were completed since the last call to this function.
int get_nb_fallback_queues(struct NotificationBufPair *notification_buf_pair)
Get number of fallback queues currently in use.
int dma_init(struct NotificationBufPair *notification_buf_pair, struct RxEnsoPipeInternal *enso_pipe, uint32_t bdf, int32_t bar, const std::string &huge_page_prefix, bool fallback)
Initializes an enso pipe and the notification buffer if needed.
int get_round_robin_status(struct NotificationBufPair *notification_buf_pair)
Gets the round robin status for the device.
void advance_pipe(struct RxEnsoPipeInternal *enso_pipe, size_t len)
Frees the next len bytes in the buffer associated with the socket_entry socket. If len is greater tha...
void enso_pipe_free(struct NotificationBufPair *notification_buf_pair, struct RxEnsoPipeInternal *enso_pipe, enso_pipe_id_t enso_pipe_id)
Frees the Enso Pipe.
int dma_finish(struct SocketInternal *socket_entry)
Frees the notification buffer and all pipes.
int enso_pipe_init(struct RxEnsoPipeInternal *enso_pipe, struct NotificationBufPair *notification_buf_pair, bool fallback)
Initializes an Enso Pipe.
uint32_t get_enso_pipe_id_from_socket(struct SocketInternal *socket_entry)
Gets the Enso Pipe ID associated with a given socket.
Functions to initialize and interface directly with the PCIe device.