mirror of
https://github.com/ipxe/ipxe
synced 2026-02-28 03:11:18 +03:00
[infiniband] Centralise assumption of 2048-byte payloads
IPoIB and the SMA have separate constants for the packet size to be used to I/O buffer allocations. Merge these into the single IB_MAX_PAYLOAD_SIZE constant. (Various other points in the Infiniband stack have hard-coded assumptions of a 2048-byte payload; we don't currently support variable MTUs.)
This commit is contained in:
@@ -345,6 +345,13 @@ int ib_post_recv ( struct ib_device *ibdev, struct ib_queue_pair *qp,
|
||||
struct io_buffer *iobuf ) {
|
||||
int rc;
|
||||
|
||||
/* Check packet length */
|
||||
if ( iob_tailroom ( iobuf ) < IB_MAX_PAYLOAD_SIZE ) {
|
||||
DBGC ( ibdev, "IBDEV %p QPN %#lx wrong RX buffer size (%zd)\n",
|
||||
ibdev, qp->qpn, iob_tailroom ( iobuf ) );
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Check queue fill level */
|
||||
if ( qp->recv.fill >= qp->recv.num_wqes ) {
|
||||
DBGC ( ibdev, "IBDEV %p QPN %#lx receive queue full\n",
|
||||
|
||||
@@ -40,7 +40,6 @@ FILE_LICENCE ( GPL2_OR_LATER );
|
||||
* @v cq_op Completion queue operations
|
||||
* @v num_send_wqes Number of send work queue entries
|
||||
* @v num_recv_wqes Number of receive work queue entries
|
||||
* @v recv_pkt_len Receive packet length
|
||||
* @v qkey Queue key
|
||||
* @ret rc Return status code
|
||||
*/
|
||||
@@ -48,7 +47,7 @@ int ib_create_qset ( struct ib_device *ibdev, struct ib_queue_set *qset,
|
||||
unsigned int num_cqes,
|
||||
struct ib_completion_queue_operations *cq_op,
|
||||
unsigned int num_send_wqes, unsigned int num_recv_wqes,
|
||||
size_t recv_pkt_len, unsigned long qkey ) {
|
||||
unsigned long qkey ) {
|
||||
int rc;
|
||||
|
||||
/* Sanity check */
|
||||
@@ -57,7 +56,6 @@ int ib_create_qset ( struct ib_device *ibdev, struct ib_queue_set *qset,
|
||||
|
||||
/* Store queue parameters */
|
||||
qset->recv_max_fill = num_recv_wqes;
|
||||
qset->recv_pkt_len = recv_pkt_len;
|
||||
|
||||
/* Allocate completion queue */
|
||||
qset->cq = ib_create_cq ( ibdev, num_cqes, cq_op );
|
||||
@@ -99,7 +97,7 @@ void ib_qset_refill_recv ( struct ib_device *ibdev,
|
||||
while ( qset->qp->recv.fill < qset->recv_max_fill ) {
|
||||
|
||||
/* Allocate I/O buffer */
|
||||
iobuf = alloc_iob ( qset->recv_pkt_len );
|
||||
iobuf = alloc_iob ( IB_MAX_PAYLOAD_SIZE );
|
||||
if ( ! iobuf ) {
|
||||
/* Non-fatal; we will refill on next attempt */
|
||||
return;
|
||||
|
||||
@@ -361,7 +361,7 @@ static void ib_sma_refill_recv ( struct ib_sma *sma ) {
|
||||
while ( sma->qp->recv.fill < IB_SMA_NUM_RECV_WQES ) {
|
||||
|
||||
/* Allocate I/O buffer */
|
||||
iobuf = alloc_iob ( IB_SMA_PAYLOAD_LEN );
|
||||
iobuf = alloc_iob ( IB_MAX_PAYLOAD_SIZE );
|
||||
if ( ! iobuf ) {
|
||||
/* Non-fatal; we will refill on next attempt */
|
||||
return;
|
||||
|
||||
Reference in New Issue
Block a user