net: ethtool: extend ringparam set/get APIs for rx_push
Similar to what was done for TX_PUSH, add an RX_PUSH concept to the ethtool interfaces. Signed-off-by: Shannon Nelson <shannon.nelson@amd.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
896de449f8
commit
5b4e9a7a71
5 changed files with 25 additions and 5 deletions
|
@ -874,6 +874,7 @@ Kernel response contents:
|
||||||
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split
|
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` u8 TCP header / data split
|
||||||
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
|
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
|
||||||
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
|
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
|
||||||
|
``ETHTOOL_A_RINGS_RX_PUSH`` u8 flag of RX Push mode
|
||||||
==================================== ====== ===========================
|
==================================== ====== ===========================
|
||||||
|
|
||||||
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with
|
``ETHTOOL_A_RINGS_TCP_DATA_SPLIT`` indicates whether the device is usable with
|
||||||
|
@ -883,8 +884,8 @@ separate buffers. The device configuration must make it possible to receive
|
||||||
full memory pages of data, for example because MTU is high enough or through
|
full memory pages of data, for example because MTU is high enough or through
|
||||||
HW-GRO.
|
HW-GRO.
|
||||||
|
|
||||||
``ETHTOOL_A_RINGS_TX_PUSH`` flag is used to enable descriptor fast
|
``ETHTOOL_A_RINGS_[RX|TX]_PUSH`` flag is used to enable descriptor fast
|
||||||
path to send packets. In ordinary path, driver fills descriptors in DRAM and
|
path to send or receive packets. In ordinary path, driver fills descriptors in DRAM and
|
||||||
notifies NIC hardware. In fast path, driver pushes descriptors to the device
|
notifies NIC hardware. In fast path, driver pushes descriptors to the device
|
||||||
through MMIO writes, thus reducing the latency. However, enabling this feature
|
through MMIO writes, thus reducing the latency. However, enabling this feature
|
||||||
may increase the CPU cost. Drivers may enforce additional per-packet
|
may increase the CPU cost. Drivers may enforce additional per-packet
|
||||||
|
@ -906,6 +907,7 @@ Request contents:
|
||||||
``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
|
``ETHTOOL_A_RINGS_RX_BUF_LEN`` u32 size of buffers on the ring
|
||||||
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
|
``ETHTOOL_A_RINGS_CQE_SIZE`` u32 Size of TX/RX CQE
|
||||||
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
|
``ETHTOOL_A_RINGS_TX_PUSH`` u8 flag of TX Push mode
|
||||||
|
``ETHTOOL_A_RINGS_RX_PUSH`` u8 flag of RX Push mode
|
||||||
==================================== ====== ===========================
|
==================================== ====== ===========================
|
||||||
|
|
||||||
Kernel checks that requested ring sizes do not exceed limits reported by
|
Kernel checks that requested ring sizes do not exceed limits reported by
|
||||||
|
|
|
@ -73,12 +73,14 @@ enum {
|
||||||
* @rx_buf_len: Current length of buffers on the rx ring.
|
* @rx_buf_len: Current length of buffers on the rx ring.
|
||||||
* @tcp_data_split: Scatter packet headers and data to separate buffers
|
* @tcp_data_split: Scatter packet headers and data to separate buffers
|
||||||
* @tx_push: The flag of tx push mode
|
* @tx_push: The flag of tx push mode
|
||||||
|
* @rx_push: The flag of rx push mode
|
||||||
* @cqe_size: Size of TX/RX completion queue event
|
* @cqe_size: Size of TX/RX completion queue event
|
||||||
*/
|
*/
|
||||||
struct kernel_ethtool_ringparam {
|
struct kernel_ethtool_ringparam {
|
||||||
u32 rx_buf_len;
|
u32 rx_buf_len;
|
||||||
u8 tcp_data_split;
|
u8 tcp_data_split;
|
||||||
u8 tx_push;
|
u8 tx_push;
|
||||||
|
u8 rx_push;
|
||||||
u32 cqe_size;
|
u32 cqe_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -87,11 +89,13 @@ struct kernel_ethtool_ringparam {
|
||||||
* @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
|
* @ETHTOOL_RING_USE_RX_BUF_LEN: capture for setting rx_buf_len
|
||||||
* @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
|
* @ETHTOOL_RING_USE_CQE_SIZE: capture for setting cqe_size
|
||||||
* @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
|
* @ETHTOOL_RING_USE_TX_PUSH: capture for setting tx_push
|
||||||
|
* @ETHTOOL_RING_USE_RX_PUSH: capture for setting rx_push
|
||||||
*/
|
*/
|
||||||
enum ethtool_supported_ring_param {
|
enum ethtool_supported_ring_param {
|
||||||
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
|
ETHTOOL_RING_USE_RX_BUF_LEN = BIT(0),
|
||||||
ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
|
ETHTOOL_RING_USE_CQE_SIZE = BIT(1),
|
||||||
ETHTOOL_RING_USE_TX_PUSH = BIT(2),
|
ETHTOOL_RING_USE_TX_PUSH = BIT(2),
|
||||||
|
ETHTOOL_RING_USE_RX_PUSH = BIT(3),
|
||||||
};
|
};
|
||||||
|
|
||||||
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
|
#define __ETH_RSS_HASH_BIT(bit) ((u32)1 << (bit))
|
||||||
|
|
|
@ -356,6 +356,7 @@ enum {
|
||||||
ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */
|
ETHTOOL_A_RINGS_TCP_DATA_SPLIT, /* u8 */
|
||||||
ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */
|
ETHTOOL_A_RINGS_CQE_SIZE, /* u32 */
|
||||||
ETHTOOL_A_RINGS_TX_PUSH, /* u8 */
|
ETHTOOL_A_RINGS_TX_PUSH, /* u8 */
|
||||||
|
ETHTOOL_A_RINGS_RX_PUSH, /* u8 */
|
||||||
|
|
||||||
/* add new constants above here */
|
/* add new constants above here */
|
||||||
__ETHTOOL_A_RINGS_CNT,
|
__ETHTOOL_A_RINGS_CNT,
|
||||||
|
|
|
@ -413,7 +413,7 @@ extern const struct nla_policy ethnl_features_set_policy[ETHTOOL_A_FEATURES_WANT
|
||||||
extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1];
|
extern const struct nla_policy ethnl_privflags_get_policy[ETHTOOL_A_PRIVFLAGS_HEADER + 1];
|
||||||
extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1];
|
extern const struct nla_policy ethnl_privflags_set_policy[ETHTOOL_A_PRIVFLAGS_FLAGS + 1];
|
||||||
extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1];
|
extern const struct nla_policy ethnl_rings_get_policy[ETHTOOL_A_RINGS_HEADER + 1];
|
||||||
extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_TX_PUSH + 1];
|
extern const struct nla_policy ethnl_rings_set_policy[ETHTOOL_A_RINGS_RX_PUSH + 1];
|
||||||
extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1];
|
extern const struct nla_policy ethnl_channels_get_policy[ETHTOOL_A_CHANNELS_HEADER + 1];
|
||||||
extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1];
|
extern const struct nla_policy ethnl_channels_set_policy[ETHTOOL_A_CHANNELS_COMBINED_COUNT + 1];
|
||||||
extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1];
|
extern const struct nla_policy ethnl_coalesce_get_policy[ETHTOOL_A_COALESCE_HEADER + 1];
|
||||||
|
|
|
@ -56,7 +56,8 @@ static int rings_reply_size(const struct ethnl_req_info *req_base,
|
||||||
nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */
|
nla_total_size(sizeof(u32)) + /* _RINGS_RX_BUF_LEN */
|
||||||
nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */
|
nla_total_size(sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */
|
||||||
nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */
|
nla_total_size(sizeof(u32) + /* _RINGS_CQE_SIZE */
|
||||||
nla_total_size(sizeof(u8))); /* _RINGS_TX_PUSH */
|
nla_total_size(sizeof(u8)) + /* _RINGS_TX_PUSH */
|
||||||
|
nla_total_size(sizeof(u8))); /* _RINGS_RX_PUSH */
|
||||||
}
|
}
|
||||||
|
|
||||||
static int rings_fill_reply(struct sk_buff *skb,
|
static int rings_fill_reply(struct sk_buff *skb,
|
||||||
|
@ -96,7 +97,8 @@ static int rings_fill_reply(struct sk_buff *skb,
|
||||||
kr->tcp_data_split))) ||
|
kr->tcp_data_split))) ||
|
||||||
(kr->cqe_size &&
|
(kr->cqe_size &&
|
||||||
(nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) ||
|
(nla_put_u32(skb, ETHTOOL_A_RINGS_CQE_SIZE, kr->cqe_size))) ||
|
||||||
nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push))
|
nla_put_u8(skb, ETHTOOL_A_RINGS_TX_PUSH, !!kr->tx_push) ||
|
||||||
|
nla_put_u8(skb, ETHTOOL_A_RINGS_RX_PUSH, !!kr->rx_push))
|
||||||
return -EMSGSIZE;
|
return -EMSGSIZE;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -114,6 +116,7 @@ const struct nla_policy ethnl_rings_set_policy[] = {
|
||||||
[ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1),
|
[ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1),
|
||||||
[ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1),
|
[ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1),
|
||||||
[ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
|
[ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
|
||||||
|
[ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1),
|
||||||
};
|
};
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
@ -147,6 +150,14 @@ ethnl_set_rings_validate(struct ethnl_req_info *req_info,
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (tb[ETHTOOL_A_RINGS_RX_PUSH] &&
|
||||||
|
!(ops->supported_ring_params & ETHTOOL_RING_USE_RX_PUSH)) {
|
||||||
|
NL_SET_ERR_MSG_ATTR(info->extack,
|
||||||
|
tb[ETHTOOL_A_RINGS_RX_PUSH],
|
||||||
|
"setting rx push not supported");
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
}
|
||||||
|
|
||||||
return ops->get_ringparam && ops->set_ringparam ? 1 : -EOPNOTSUPP;
|
return ops->get_ringparam && ops->set_ringparam ? 1 : -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -176,6 +187,8 @@ ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info)
|
||||||
tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod);
|
tb[ETHTOOL_A_RINGS_CQE_SIZE], &mod);
|
||||||
ethnl_update_u8(&kernel_ringparam.tx_push,
|
ethnl_update_u8(&kernel_ringparam.tx_push,
|
||||||
tb[ETHTOOL_A_RINGS_TX_PUSH], &mod);
|
tb[ETHTOOL_A_RINGS_TX_PUSH], &mod);
|
||||||
|
ethnl_update_u8(&kernel_ringparam.rx_push,
|
||||||
|
tb[ETHTOOL_A_RINGS_RX_PUSH], &mod);
|
||||||
if (!mod)
|
if (!mod)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue