mirror of
https://github.com/ipxe/ipxe
synced 2025-12-17 10:01:03 +03:00
Add RX quotas to the net device poll() method. This avoids the problem
of alloc_pkb() exhaustion when e.g. an iSCSI-booted DOS session is left idle for a long time at the C:\ prompt and builds up a huge packet backlog.
This commit is contained in:
@@ -126,16 +126,17 @@ void netdev_rx ( struct net_device *netdev, struct pk_buff *pkb ) {
|
||||
* Poll for packet on network device
|
||||
*
|
||||
* @v netdev Network device
|
||||
* @v rx_quota Maximum number of packets to receive
|
||||
* @ret True There are packets present in the receive queue
|
||||
* @ret False There are no packets present in the receive queue
|
||||
*
|
||||
* Polls the network device for received packets. Any received
|
||||
* packets will be added to the RX packet queue via netdev_rx().
|
||||
*/
|
||||
int netdev_poll ( struct net_device *netdev ) {
|
||||
int netdev_poll ( struct net_device *netdev, unsigned int rx_quota ) {
|
||||
|
||||
if ( netdev->state & NETDEV_OPEN )
|
||||
netdev->poll ( netdev );
|
||||
netdev->poll ( netdev, rx_quota );
|
||||
|
||||
return ( ! list_empty ( &netdev->rx_queue ) );
|
||||
}
|
||||
@@ -351,14 +352,9 @@ int net_rx ( struct pk_buff *pkb, struct net_device *netdev,
|
||||
*
|
||||
* @v process Network stack process
|
||||
*
|
||||
* This polls all interfaces for any received packets, and processes
|
||||
* at most one packet from the RX queue.
|
||||
* This polls all interfaces for received packets, and processes
|
||||
* packets from the RX queue.
|
||||
*
|
||||
* We avoid processing all received packets, because processing the
|
||||
* received packet can trigger transmission of a new packet (e.g. an
|
||||
* ARP response). Since TX completions will be processed as part of
|
||||
* the poll operation, it is easy to overflow small TX queues if
|
||||
* multiple packets are processed per poll.
|
||||
*/
|
||||
static void net_step ( struct process *process ) {
|
||||
struct net_device *netdev;
|
||||
@@ -367,10 +363,28 @@ static void net_step ( struct process *process ) {
|
||||
/* Poll and process each network device */
|
||||
list_for_each_entry ( netdev, &net_devices, list ) {
|
||||
|
||||
/* Poll for new packets */
|
||||
netdev_poll ( netdev );
|
||||
/* Poll for new packets. Limit RX queue size to a
|
||||
* single packet, because otherwise most drivers are
|
||||
* in serious danger of running out of memory and
|
||||
* having to drop packets.
|
||||
*
|
||||
* This limitation isn't relevant to devices that
|
||||
* preallocate packet buffers (i.e. devices with
|
||||
* descriptor-based RX datapaths). We might at some
|
||||
* point want to relax the quota for such devices.
|
||||
*/
|
||||
netdev_poll ( netdev,
|
||||
( list_empty ( &netdev->rx_queue ) ? 1 : 0 ) );
|
||||
|
||||
/* Handle at most one received packet per poll */
|
||||
/* Handle at most one received packet per poll. We
|
||||
* avoid processing more than one packet per call to
|
||||
* netdev_poll(), because processing the received
|
||||
* packet can trigger transmission of a new packet
|
||||
* (e.g. an ARP response). Since TX completions will
|
||||
* be processed as part of the poll operation, it is
|
||||
* easy to overflow small TX queues if multiple
|
||||
* packets are processed per poll.
|
||||
*/
|
||||
if ( ( pkb = netdev_rx_dequeue ( netdev ) ) ) {
|
||||
DBGC ( netdev, "NETDEV %p processing %p\n",
|
||||
netdev, pkb );
|
||||
|
||||
Reference in New Issue
Block a user