|
@@ -158,43 +158,68 @@ static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
|
|
|
napi_disable(&tqp_vector->napi);
|
|
|
}
|
|
|
|
|
|
-static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
- u32 gl_value)
|
|
|
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
+ u32 rl_value)
|
|
|
{
|
|
|
- /* this defines the configuration for GL (Interrupt Gap Limiter)
|
|
|
- * GL defines inter interrupt gap.
|
|
|
- * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
|
|
|
- */
|
|
|
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
|
|
|
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
|
|
|
- writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
|
|
|
-}
|
|
|
+ u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
|
|
|
|
|
|
-static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
- u32 rl_value)
|
|
|
-{
|
|
|
/* this defines the configuration for RL (Interrupt Rate Limiter).
|
|
|
* Rl defines rate of interrupts i.e. number of interrupts-per-second
|
|
|
* GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
|
|
|
*/
|
|
|
- writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
|
|
|
+
|
|
|
+ if (rl_reg > 0 && !tqp_vector->tx_group.gl_adapt_enable &&
|
|
|
+ !tqp_vector->rx_group.gl_adapt_enable)
|
|
|
+ /* According to the hardware, the range of rl_reg is
|
|
|
+ * 0-59 and the unit is 4.
|
|
|
+ */
|
|
|
+ rl_reg |= HNS3_INT_RL_ENABLE_MASK;
|
|
|
+
|
|
|
+ writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
|
|
|
+}
|
|
|
+
|
|
|
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
+ u32 gl_value)
|
|
|
+{
|
|
|
+ u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
|
|
|
+
|
|
|
+ writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
|
|
|
+}
|
|
|
+
|
|
|
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
+ u32 gl_value)
|
|
|
+{
|
|
|
+ u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
|
|
|
+
|
|
|
+ writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
|
|
|
}
|
|
|
|
|
|
-static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
|
|
|
+static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
+ struct hns3_nic_priv *priv)
|
|
|
{
|
|
|
+ struct hnae3_handle *h = priv->ae_handle;
|
|
|
+
|
|
|
/* initialize the configuration for interrupt coalescing.
|
|
|
* 1. GL (Interrupt Gap Limiter)
|
|
|
* 2. RL (Interrupt Rate Limiter)
|
|
|
*/
|
|
|
|
|
|
- /* Default :enable interrupt coalesce */
|
|
|
- tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
|
|
|
+ /* Default: enable interrupt coalescing self-adaptive and GL */
|
|
|
+ tqp_vector->tx_group.gl_adapt_enable = 1;
|
|
|
+ tqp_vector->rx_group.gl_adapt_enable = 1;
|
|
|
+
|
|
|
tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
|
|
|
- hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
|
|
|
- /* for now we are disabling Interrupt RL - we
|
|
|
- * will re-enable later
|
|
|
- */
|
|
|
- hns3_set_vector_coalesc_rl(tqp_vector, 0);
|
|
|
+ tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
|
|
|
+
|
|
|
+ hns3_set_vector_coalesce_tx_gl(tqp_vector,
|
|
|
+ tqp_vector->tx_group.int_gl);
|
|
|
+ hns3_set_vector_coalesce_rx_gl(tqp_vector,
|
|
|
+ tqp_vector->rx_group.int_gl);
|
|
|
+
|
|
|
+ /* Default: disable RL */
|
|
|
+ h->kinfo.int_rl_setting = 0;
|
|
|
+ hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
|
|
|
+
|
|
|
tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
|
|
|
tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
|
|
|
}
|
|
@@ -1093,26 +1118,31 @@ static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
|
|
|
static int hns3_nic_set_features(struct net_device *netdev,
|
|
|
netdev_features_t features)
|
|
|
{
|
|
|
+ netdev_features_t changed = netdev->features ^ features;
|
|
|
struct hns3_nic_priv *priv = netdev_priv(netdev);
|
|
|
struct hnae3_handle *h = priv->ae_handle;
|
|
|
- netdev_features_t changed;
|
|
|
int ret;
|
|
|
|
|
|
- if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
|
|
|
- priv->ops.fill_desc = hns3_fill_desc_tso;
|
|
|
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
|
|
|
- } else {
|
|
|
- priv->ops.fill_desc = hns3_fill_desc;
|
|
|
- priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
|
|
|
+ if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
|
|
|
+ if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
|
|
|
+ priv->ops.fill_desc = hns3_fill_desc_tso;
|
|
|
+ priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
|
|
|
+ } else {
|
|
|
+ priv->ops.fill_desc = hns3_fill_desc;
|
|
|
+ priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
- if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
|
|
|
- h->ae_algo->ops->enable_vlan_filter(h, true);
|
|
|
- else
|
|
|
- h->ae_algo->ops->enable_vlan_filter(h, false);
|
|
|
+ if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
|
|
|
+ h->ae_algo->ops->enable_vlan_filter) {
|
|
|
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
|
|
|
+ h->ae_algo->ops->enable_vlan_filter(h, true);
|
|
|
+ else
|
|
|
+ h->ae_algo->ops->enable_vlan_filter(h, false);
|
|
|
+ }
|
|
|
|
|
|
- changed = netdev->features ^ features;
|
|
|
- if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
|
|
|
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
|
|
|
+ h->ae_algo->ops->enable_hw_strip_rxvtag) {
|
|
|
if (features & NETIF_F_HW_VLAN_CTAG_RX)
|
|
|
ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
|
|
|
else
|
|
@@ -2422,25 +2452,22 @@ static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
|
|
|
|
|
|
static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
|
|
|
{
|
|
|
- u16 rx_int_gl, tx_int_gl;
|
|
|
- bool rx, tx;
|
|
|
-
|
|
|
- rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
|
|
|
- tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
|
|
|
- rx_int_gl = tqp_vector->rx_group.int_gl;
|
|
|
- tx_int_gl = tqp_vector->tx_group.int_gl;
|
|
|
- if (rx && tx) {
|
|
|
- if (rx_int_gl > tx_int_gl) {
|
|
|
- tqp_vector->tx_group.int_gl = rx_int_gl;
|
|
|
- tqp_vector->tx_group.flow_level =
|
|
|
- tqp_vector->rx_group.flow_level;
|
|
|
- hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
|
|
|
- } else {
|
|
|
- tqp_vector->rx_group.int_gl = tx_int_gl;
|
|
|
- tqp_vector->rx_group.flow_level =
|
|
|
- tqp_vector->tx_group.flow_level;
|
|
|
- hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
|
|
|
- }
|
|
|
+ struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
|
|
|
+ struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
|
|
|
+ bool rx_update, tx_update;
|
|
|
+
|
|
|
+ if (rx_group->gl_adapt_enable) {
|
|
|
+ rx_update = hns3_get_new_int_gl(rx_group);
|
|
|
+ if (rx_update)
|
|
|
+ hns3_set_vector_coalesce_rx_gl(tqp_vector,
|
|
|
+ rx_group->int_gl);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (tx_group->gl_adapt_enable) {
|
|
|
+ tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
|
|
|
+ if (tx_update)
|
|
|
+ hns3_set_vector_coalesce_tx_gl(tqp_vector,
|
|
|
+ tx_group->int_gl);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2501,6 +2528,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
cur_chain->tqp_index = tx_ring->tqp->tqp_index;
|
|
|
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
|
|
|
HNAE3_RING_TYPE_TX);
|
|
|
+ hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
|
|
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
|
|
|
|
|
|
cur_chain->next = NULL;
|
|
|
|
|
@@ -2516,6 +2545,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
chain->tqp_index = tx_ring->tqp->tqp_index;
|
|
|
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
|
|
|
HNAE3_RING_TYPE_TX);
|
|
|
+ hnae_set_field(chain->int_gl_idx,
|
|
|
+ HNAE3_RING_GL_IDX_M,
|
|
|
+ HNAE3_RING_GL_IDX_S,
|
|
|
+ HNAE3_RING_GL_TX);
|
|
|
|
|
|
cur_chain = chain;
|
|
|
}
|
|
@@ -2527,6 +2560,8 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
cur_chain->tqp_index = rx_ring->tqp->tqp_index;
|
|
|
hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
|
|
|
HNAE3_RING_TYPE_RX);
|
|
|
+ hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
|
|
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
|
|
|
|
|
|
rx_ring = rx_ring->next;
|
|
|
}
|
|
@@ -2540,6 +2575,9 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
|
|
|
chain->tqp_index = rx_ring->tqp->tqp_index;
|
|
|
hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
|
|
|
HNAE3_RING_TYPE_RX);
|
|
|
+ hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
|
|
|
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
|
|
|
+
|
|
|
cur_chain = chain;
|
|
|
|
|
|
rx_ring = rx_ring->next;
|
|
@@ -2628,7 +2666,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
|
|
|
tqp_vector->rx_group.total_packets = 0;
|
|
|
tqp_vector->tx_group.total_bytes = 0;
|
|
|
tqp_vector->tx_group.total_packets = 0;
|
|
|
- hns3_vector_gl_rl_init(tqp_vector);
|
|
|
+ hns3_vector_gl_rl_init(tqp_vector, priv);
|
|
|
tqp_vector->handle = h;
|
|
|
|
|
|
ret = hns3_get_vector_ring_chain(tqp_vector,
|