Skip to content

Commit

Permalink
sfc: allocate channels for XDP tx queues
Browse files Browse the repository at this point in the history
Each CPU needs access to its own queue to allow uncontested
transmission of XDP_TX packets. This means we need to allocate (up
front) enough channels ("xdp transmit channels") to provide at least
one extra tx queue per CPU. These tx queues should not do TSO.

Signed-off-by: Charles McLachlan <[email protected]>
Acked-by: Jesper Dangaard Brouer <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
cmclachl-xilinx authored and davem330 committed Oct 31, 2019
1 parent e45a4fe commit 3990a8f
Show file tree
Hide file tree
Showing 4 changed files with 190 additions and 40 deletions.
14 changes: 9 additions & 5 deletions drivers/net/ethernet/sfc/ef10.c
Original file line number Diff line number Diff line change
Expand Up @@ -946,8 +946,10 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
/* Extra channels, even those with TXQs (PTP), do not require
* PIO resources.
*/
if (!channel->type->want_pio)
if (!channel->type->want_pio ||
channel->channel >= efx->xdp_channel_offset)
continue;

efx_for_each_channel_tx_queue(tx_queue, channel) {
/* We assign the PIO buffers to queues in
* reverse order to allow for the following
Expand Down Expand Up @@ -1296,8 +1298,9 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
int rc;

channel_vis = max(efx->n_channels,
(efx->n_tx_channels + efx->n_extra_tx_channels) *
EFX_TXQ_TYPES);
((efx->n_tx_channels + efx->n_extra_tx_channels) *
EFX_TXQ_TYPES) +
efx->n_xdp_channels * efx->xdp_tx_per_channel);

#ifdef EFX_USE_PIO
/* Try to allocate PIO buffers if wanted and if the full
Expand Down Expand Up @@ -2434,11 +2437,12 @@ static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
/* TSOv2 is a limited resource that can only be configured on a limited
* number of queues. TSO without checksum offload is not really a thing,
* so we only enable it for those queues.
* TSOv2 cannot be used with Hardware timestamping.
* TSOv2 cannot be used with Hardware timestamping, and is never needed
* for XDP tx.
*/
if (csum_offload && (nic_data->datapath_caps2 &
(1 << MC_CMD_GET_CAPABILITIES_V2_OUT_TX_TSO_V2_LBN)) &&
!tx_queue->timestamping) {
!tx_queue->timestamping && !tx_queue->xdp_tx) {
tso_v2 = true;
netif_dbg(efx, hw, efx->net_dev, "Using TSOv2 for channel %u\n",
channel->channel);
Expand Down
180 changes: 148 additions & 32 deletions drivers/net/ethernet/sfc/efx.c
Original file line number Diff line number Diff line change
Expand Up @@ -583,9 +583,14 @@ efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
int number;

number = channel->channel;
if (efx->tx_channel_offset == 0) {

if (number >= efx->xdp_channel_offset &&
!WARN_ON_ONCE(!efx->n_xdp_channels)) {
type = "-xdp";
number -= efx->xdp_channel_offset;
} else if (efx->tx_channel_offset == 0) {
type = "";
} else if (channel->channel < efx->tx_channel_offset) {
} else if (number < efx->tx_channel_offset) {
type = "-rx";
} else {
type = "-tx";
Expand Down Expand Up @@ -803,6 +808,8 @@ static void efx_remove_channels(struct efx_nic *efx)

efx_for_each_channel(channel, efx)
efx_remove_channel(channel);

kfree(efx->xdp_tx_queues);
}

int
Expand Down Expand Up @@ -1440,6 +1447,101 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
return count;
}

static int efx_allocate_msix_channels(struct efx_nic *efx,
unsigned int max_channels,
unsigned int extra_channels,
unsigned int parallelism)
{
unsigned int n_channels = parallelism;
int vec_count;
int n_xdp_tx;
int n_xdp_ev;

if (efx_separate_tx_channels)
n_channels *= 2;
n_channels += extra_channels;

/* To allow XDP transmit to happen from arbitrary NAPI contexts
* we allocate a TX queue per CPU. We share event queues across
* multiple tx queues, assuming tx and ev queues are both
* maximum size.
*/

n_xdp_tx = num_possible_cpus();
n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, EFX_TXQ_TYPES);

/* Check resources.
* We need a channel per event queue, plus a VI per tx queue.
* This may be more pessimistic than it needs to be.
*/
if (n_channels + n_xdp_ev > max_channels) {
netif_err(efx, drv, efx->net_dev,
"Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
n_xdp_ev, n_channels, max_channels);
efx->n_xdp_channels = 0;
efx->xdp_tx_per_channel = 0;
efx->xdp_tx_queue_count = 0;
} else {
efx->n_xdp_channels = n_xdp_ev;
efx->xdp_tx_per_channel = EFX_TXQ_TYPES;
efx->xdp_tx_queue_count = n_xdp_tx;
n_channels += n_xdp_ev;
netif_dbg(efx, drv, efx->net_dev,
"Allocating %d TX and %d event queues for XDP\n",
n_xdp_tx, n_xdp_ev);
}

n_channels = min(n_channels, max_channels);

vec_count = pci_msix_vec_count(efx->pci_dev);
if (vec_count < 0)
return vec_count;
if (vec_count < n_channels) {
netif_err(efx, drv, efx->net_dev,
"WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
vec_count, n_channels);
netif_err(efx, drv, efx->net_dev,
"WARNING: Performance may be reduced.\n");
n_channels = vec_count;
}

efx->n_channels = n_channels;

/* Do not create the PTP TX queue(s) if PTP uses the MC directly. */
if (extra_channels && !efx_ptp_use_mac_tx_timestamps(efx))
n_channels--;

/* Ignore XDP tx channels when creating rx channels. */
n_channels -= efx->n_xdp_channels;

if (efx_separate_tx_channels) {
efx->n_tx_channels =
min(max(n_channels / 2, 1U),
efx->max_tx_channels);
efx->tx_channel_offset =
n_channels - efx->n_tx_channels;
efx->n_rx_channels =
max(n_channels -
efx->n_tx_channels, 1U);
} else {
efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
efx->tx_channel_offset = 0;
efx->n_rx_channels = n_channels;
}

if (efx->n_xdp_channels)
efx->xdp_channel_offset = efx->tx_channel_offset +
efx->n_tx_channels;
else
efx->xdp_channel_offset = efx->n_channels;

netif_dbg(efx, drv, efx->net_dev,
"Allocating %u RX channels\n",
efx->n_rx_channels);

return efx->n_channels;
}

/* Probe the number and type of interrupts we are able to obtain, and
* the resulting numbers of channels and RX queues.
*/
Expand All @@ -1454,19 +1556,19 @@ static int efx_probe_interrupts(struct efx_nic *efx)
++extra_channels;

if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
unsigned int parallelism = efx_wanted_parallelism(efx);
struct msix_entry xentries[EFX_MAX_CHANNELS];
unsigned int n_channels;

n_channels = efx_wanted_parallelism(efx);
if (efx_separate_tx_channels)
n_channels *= 2;
n_channels += extra_channels;
n_channels = min(n_channels, efx->max_channels);

for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
rc = pci_enable_msix_range(efx->pci_dev,
xentries, 1, n_channels);
rc = efx_allocate_msix_channels(efx, efx->max_channels,
extra_channels, parallelism);
if (rc >= 0) {
n_channels = rc;
for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
n_channels);
}
if (rc < 0) {
/* Fall back to single channel MSI */
netif_err(efx, drv, efx->net_dev,
Expand All @@ -1485,21 +1587,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
}

if (rc > 0) {
efx->n_channels = n_channels;
if (n_channels > extra_channels)
n_channels -= extra_channels;
if (efx_separate_tx_channels) {
efx->n_tx_channels = min(max(n_channels / 2,
1U),
efx->max_tx_channels);
efx->n_rx_channels = max(n_channels -
efx->n_tx_channels,
1U);
} else {
efx->n_tx_channels = min(n_channels,
efx->max_tx_channels);
efx->n_rx_channels = n_channels;
}
for (i = 0; i < efx->n_channels; i++)
efx_get_channel(efx, i)->irq =
xentries[i].vector;
Expand All @@ -1511,6 +1598,8 @@ static int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1;
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
rc = pci_enable_msi(efx->pci_dev);
if (rc == 0) {
efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
Expand All @@ -1529,12 +1618,14 @@ static int efx_probe_interrupts(struct efx_nic *efx)
efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
efx->n_rx_channels = 1;
efx->n_tx_channels = 1;
efx->n_xdp_channels = 0;
efx->xdp_channel_offset = efx->n_channels;
efx->legacy_irq = efx->pci_dev->irq;
}

/* Assign extra channels if possible */
/* Assign extra channels if possible, before XDP channels */
efx->n_extra_tx_channels = 0;
j = efx->n_channels;
j = efx->xdp_channel_offset;
for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
if (!efx->extra_channel_type[i])
continue;
Expand Down Expand Up @@ -1729,29 +1820,50 @@ static void efx_remove_interrupts(struct efx_nic *efx)
efx->legacy_irq = 0;
}

static void efx_set_channels(struct efx_nic *efx)
static int efx_set_channels(struct efx_nic *efx)
{
struct efx_channel *channel;
struct efx_tx_queue *tx_queue;
int xdp_queue_number;

efx->tx_channel_offset =
efx_separate_tx_channels ?
efx->n_channels - efx->n_tx_channels : 0;

if (efx->xdp_tx_queue_count) {
EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);

/* Allocate array for XDP TX queue lookup. */
efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
sizeof(*efx->xdp_tx_queues),
GFP_KERNEL);
if (!efx->xdp_tx_queues)
return -ENOMEM;
}

/* We need to mark which channels really have RX and TX
* queues, and adjust the TX queue numbers if we have separate
* RX-only and TX-only channels.
*/
xdp_queue_number = 0;
efx_for_each_channel(channel, efx) {
if (channel->channel < efx->n_rx_channels)
channel->rx_queue.core_index = channel->channel;
else
channel->rx_queue.core_index = -1;

efx_for_each_channel_tx_queue(tx_queue, channel)
efx_for_each_channel_tx_queue(tx_queue, channel) {
tx_queue->queue -= (efx->tx_channel_offset *
EFX_TXQ_TYPES);

if (efx_channel_is_xdp_tx(channel) &&
xdp_queue_number < efx->xdp_tx_queue_count) {
efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
xdp_queue_number++;
}
}
}
return 0;
}

static int efx_probe_nic(struct efx_nic *efx)
Expand Down Expand Up @@ -1781,7 +1893,9 @@ static int efx_probe_nic(struct efx_nic *efx)
if (rc)
goto fail1;

efx_set_channels(efx);
rc = efx_set_channels(efx);
if (rc)
goto fail1;

/* dimension_resources can fail with EAGAIN */
rc = efx->type->dimension_resources(efx);
Expand Down Expand Up @@ -2091,6 +2205,8 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
channel->irq_moderation_us = rx_usecs;
else if (efx_channel_has_tx_queues(channel))
channel->irq_moderation_us = tx_usecs;
else if (efx_channel_is_xdp_tx(channel))
channel->irq_moderation_us = tx_usecs;
}

return 0;
Expand Down
Loading

0 comments on commit 3990a8f

Please sign in to comment.