mirror of
https://github.com/ipxe/ipxe
synced 2025-12-07 01:40:28 +03:00
[ena] Limit receive queue size to work around hardware bugs
Commit a801244 ("[ena] Increase receive ring size to 128 entries")
increased the receive ring size to 128 entries (while leaving the fill
level at 16), since using a smaller receive ring caused unexplained
failures on some instance types.
The original hardware bug that resulted in that commit seems to have
been fixed: experiments suggest that the original failure (observed on
a c6i.large instance in eu-west-2) will no longer reproduce when using
a receive ring containing only 16 entries (as was the case prior to
that commit).
Newer generations of the ENA hardware (observed on an m8i.large
instance in eu-south-2) seem to have a new and exciting hardware bug:
these instance types appear to use a hash of the received packet
header to determine which portion of the (out-of-order) receive ring
to use. If that portion of the ring happens to be empty (e.g. because
only 32 entries of the 128-entry ring are filled at any one time),
then the packet will be silently dropped.
Work around this new hardware bug by reducing the receive ring size
down to the current fill level of 32 entries. This appears to work on
all current instance types (but has not been exhaustively tested).
Signed-off-by: Michael Brown <mcb30@ipxe.org>
This commit is contained in:
@@ -494,7 +494,7 @@ static int ena_create_sq ( struct ena_nic *ena, struct ena_sq *sq,
|
||||
sq->phase = ENA_SQE_PHASE;
|
||||
|
||||
/* Calculate fill level */
|
||||
sq->fill = sq->max;
|
||||
sq->fill = sq->count;
|
||||
if ( sq->fill > cq->actual )
|
||||
sq->fill = cq->actual;
|
||||
|
||||
@@ -1358,11 +1358,11 @@ static int ena_probe ( struct pci_device *pci ) {
|
||||
ena->acq.phase = ENA_ACQ_PHASE;
|
||||
ena_cq_init ( &ena->tx.cq, ENA_TX_COUNT,
|
||||
sizeof ( ena->tx.cq.cqe.tx[0] ) );
|
||||
ena_sq_init ( &ena->tx.sq, ENA_SQ_TX, ENA_TX_COUNT, ENA_TX_COUNT,
|
||||
ena_sq_init ( &ena->tx.sq, ENA_SQ_TX, ENA_TX_COUNT,
|
||||
sizeof ( ena->tx.sq.sqe.tx[0] ), ena->tx_ids );
|
||||
ena_cq_init ( &ena->rx.cq, ENA_RX_COUNT,
|
||||
sizeof ( ena->rx.cq.cqe.rx[0] ) );
|
||||
ena_sq_init ( &ena->rx.sq, ENA_SQ_RX, ENA_RX_COUNT, ENA_RX_FILL,
|
||||
ena_sq_init ( &ena->rx.sq, ENA_SQ_RX, ENA_RX_COUNT,
|
||||
sizeof ( ena->rx.sq.sqe.rx[0] ), ena->rx_ids );
|
||||
|
||||
/* Fix up PCI device */
|
||||
|
||||
@@ -37,10 +37,7 @@ FILE_LICENCE ( GPL2_OR_LATER_OR_UBDL );
|
||||
#define ENA_TX_COUNT 32
|
||||
|
||||
/** Number of receive queue entries */
|
||||
#define ENA_RX_COUNT 128
|
||||
|
||||
/** Receive queue maximum fill level */
|
||||
#define ENA_RX_FILL 32
|
||||
#define ENA_RX_COUNT 32
|
||||
|
||||
/** Base address low register offset */
|
||||
#define ENA_BASE_LO 0x0
|
||||
@@ -753,8 +750,6 @@ struct ena_sq {
|
||||
uint8_t direction;
|
||||
/** Number of entries */
|
||||
uint8_t count;
|
||||
/** Maximum fill level */
|
||||
uint8_t max;
|
||||
/** Fill level (limited to completion queue size) */
|
||||
uint8_t fill;
|
||||
/** Maximum inline header length */
|
||||
@@ -767,19 +762,17 @@ struct ena_sq {
|
||||
* @v sq Submission queue
|
||||
* @v direction Direction
|
||||
* @v count Number of entries
|
||||
* @v max Maximum fill level
|
||||
* @v size Size of each entry
|
||||
* @v ids Buffer IDs
|
||||
*/
|
||||
static inline __attribute__ (( always_inline )) void
|
||||
ena_sq_init ( struct ena_sq *sq, unsigned int direction, unsigned int count,
|
||||
unsigned int max, size_t size, uint8_t *ids ) {
|
||||
size_t size, uint8_t *ids ) {
|
||||
|
||||
sq->len = ( count * size );
|
||||
sq->policy = ( ENA_SQ_HOST_MEMORY | ENA_SQ_CONTIGUOUS );
|
||||
sq->direction = direction;
|
||||
sq->count = count;
|
||||
sq->max = max;
|
||||
sq->ids = ids;
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user