Merge branch 'cxgb4-ch_ktls-updates-in-net-next'

Rohit Maheshwari says:

====================
cxgb4/ch_ktls: updates in net-next

This series of patches improves connections setup and statistics.

This series is broken down as follows:

Patch 1 fixes the handling of connection setup failure in HW. Driver
shouldn't return success to tls_dev_add, until HW returns success.

Patch 2 avoids the log flood.

Patch 3 adds ktls statistics at port level.

v1->v2:
- removed conn_up from all places.

v2->v3:
- Corrected timeout handling.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-09-29 13:57:03 -07:00
Родитель 2c956a5ad4 3427e13e5a
Коммит 902c2a3168
6 изменённых файлов: 235 добавлений и 203 удалений

Просмотреть файл

@ -3527,6 +3527,10 @@ DEFINE_SHOW_ATTRIBUTE(meminfo);
static int chcr_stats_show(struct seq_file *seq, void *v) static int chcr_stats_show(struct seq_file *seq, void *v)
{ {
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
struct ch_ktls_port_stats_debug *ktls_port;
int i = 0;
#endif
struct adapter *adap = seq->private; struct adapter *adap = seq->private;
seq_puts(seq, "Chelsio Crypto Accelerator Stats \n"); seq_puts(seq, "Chelsio Crypto Accelerator Stats \n");
@ -3557,18 +3561,6 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n"); seq_puts(seq, "\nChelsio KTLS Crypto Accelerator Stats\n");
seq_printf(seq, "Tx TLS offload refcount: %20u\n", seq_printf(seq, "Tx TLS offload refcount: %20u\n",
refcount_read(&adap->chcr_ktls.ktls_refcount)); refcount_read(&adap->chcr_ktls.ktls_refcount));
seq_printf(seq, "Tx HW offload contexts added: %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_ctx));
seq_printf(seq, "Tx connection created: %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_connection_open));
seq_printf(seq, "Tx connection failed: %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_connection_fail));
seq_printf(seq, "Tx connection closed: %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_connection_close));
seq_printf(seq, "Packets passed for encryption : %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_encrypted_packets));
seq_printf(seq, "Bytes passed for encryption : %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_encrypted_bytes));
seq_printf(seq, "Tx records send: %20llu\n", seq_printf(seq, "Tx records send: %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_send_records)); atomic64_read(&adap->ch_ktls_stats.ktls_tx_send_records));
seq_printf(seq, "Tx partial start of records: %20llu\n", seq_printf(seq, "Tx partial start of records: %20llu\n",
@ -3581,14 +3573,17 @@ static int chcr_stats_show(struct seq_file *seq, void *v)
atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts)); atomic64_read(&adap->ch_ktls_stats.ktls_tx_complete_pkts));
seq_printf(seq, "TX trim pkts : %20llu\n", seq_printf(seq, "TX trim pkts : %20llu\n",
atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts)); atomic64_read(&adap->ch_ktls_stats.ktls_tx_trimmed_pkts));
seq_printf(seq, "Tx out of order packets: %20llu\n", while (i < MAX_NPORTS) {
atomic64_read(&adap->ch_ktls_stats.ktls_tx_ooo)); ktls_port = &adap->ch_ktls_stats.ktls_port[i];
seq_printf(seq, "Tx drop pkts before HW offload: %20llu\n", seq_printf(seq, "Port %d\n", i);
atomic64_read(&adap->ch_ktls_stats.ktls_tx_skip_no_sync_data)); seq_printf(seq, "Tx connection created: %20llu\n",
seq_printf(seq, "Tx drop not synced packets: %20llu\n", atomic64_read(&ktls_port->ktls_tx_connection_open));
atomic64_read(&adap->ch_ktls_stats.ktls_tx_drop_no_sync_data)); seq_printf(seq, "Tx connection failed: %20llu\n",
seq_printf(seq, "Tx drop bypass req: %20llu\n", atomic64_read(&ktls_port->ktls_tx_connection_fail));
atomic64_read(&adap->ch_ktls_stats.ktls_tx_drop_bypass_req)); seq_printf(seq, "Tx connection closed: %20llu\n",
atomic64_read(&ktls_port->ktls_tx_connection_close));
i++;
}
#endif #endif
return 0; return 0;
} }

Просмотреть файл

@ -117,14 +117,6 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
"vlan_insertions ", "vlan_insertions ",
"gro_packets ", "gro_packets ",
"gro_merged ", "gro_merged ",
};
static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
"db_drop ",
"db_full ",
"db_empty ",
"write_coal_success ",
"write_coal_fail ",
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
"tx_tls_encrypted_packets", "tx_tls_encrypted_packets",
"tx_tls_encrypted_bytes ", "tx_tls_encrypted_bytes ",
@ -136,6 +128,14 @@ static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
#endif #endif
}; };
static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
"db_drop ",
"db_full ",
"db_empty ",
"write_coal_success ",
"write_coal_fail ",
};
static char loopback_stats_strings[][ETH_GSTRING_LEN] = { static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
"-------Loopback----------- ", "-------Loopback----------- ",
"octets_ok ", "octets_ok ",
@ -257,14 +257,6 @@ struct queue_port_stats {
u64 vlan_ins; u64 vlan_ins;
u64 gro_pkts; u64 gro_pkts;
u64 gro_merged; u64 gro_merged;
};
struct adapter_stats {
u64 db_drop;
u64 db_full;
u64 db_empty;
u64 wc_success;
u64 wc_fail;
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
u64 tx_tls_encrypted_packets; u64 tx_tls_encrypted_packets;
u64 tx_tls_encrypted_bytes; u64 tx_tls_encrypted_bytes;
@ -276,12 +268,23 @@ struct adapter_stats {
#endif #endif
}; };
struct adapter_stats {
u64 db_drop;
u64 db_full;
u64 db_empty;
u64 wc_success;
u64 wc_fail;
};
static void collect_sge_port_stats(const struct adapter *adap, static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p, const struct port_info *p,
struct queue_port_stats *s) struct queue_port_stats *s)
{ {
const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset]; const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset]; const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
const struct ch_ktls_port_stats_debug *ktls_stats;
#endif
struct sge_eohw_txq *eohw_tx; struct sge_eohw_txq *eohw_tx;
unsigned int i; unsigned int i;
@ -306,6 +309,21 @@ static void collect_sge_port_stats(const struct adapter *adap,
s->vlan_ins += eohw_tx->vlan_ins; s->vlan_ins += eohw_tx->vlan_ins;
} }
} }
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
ktls_stats = &adap->ch_ktls_stats.ktls_port[p->port_id];
s->tx_tls_encrypted_packets =
atomic64_read(&ktls_stats->ktls_tx_encrypted_packets);
s->tx_tls_encrypted_bytes =
atomic64_read(&ktls_stats->ktls_tx_encrypted_bytes);
s->tx_tls_ctx = atomic64_read(&ktls_stats->ktls_tx_ctx);
s->tx_tls_ooo = atomic64_read(&ktls_stats->ktls_tx_ooo);
s->tx_tls_skip_no_sync_data =
atomic64_read(&ktls_stats->ktls_tx_skip_no_sync_data);
s->tx_tls_drop_no_sync_data =
atomic64_read(&ktls_stats->ktls_tx_drop_no_sync_data);
s->tx_tls_drop_bypass_req =
atomic64_read(&ktls_stats->ktls_tx_drop_bypass_req);
#endif
} }
static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s) static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)

Просмотреть файл

@ -690,8 +690,8 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
* ULD is/are already active, return failure. * ULD is/are already active, return failure.
*/ */
if (cxgb4_uld_in_use(adap)) { if (cxgb4_uld_in_use(adap)) {
dev_warn(adap->pdev_dev, dev_dbg(adap->pdev_dev,
"ULD connections (tid/stid) active. Can't enable kTLS\n"); "ULD connections (tid/stid) active. Can't enable kTLS\n");
return -EINVAL; return -EINVAL;
} }
ret = t4_set_params(adap, adap->mbox, adap->pf, ret = t4_set_params(adap, adap->mbox, adap->pf,
@ -699,7 +699,7 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
if (ret) if (ret)
return ret; return ret;
refcount_set(&adap->chcr_ktls.ktls_refcount, 1); refcount_set(&adap->chcr_ktls.ktls_refcount, 1);
pr_info("kTLS has been enabled. Restrictions placed on ULD support\n"); pr_debug("kTLS has been enabled. Restrictions placed on ULD support\n");
} else { } else {
/* ktls settings already up, just increment refcount. */ /* ktls settings already up, just increment refcount. */
refcount_inc(&adap->chcr_ktls.ktls_refcount); refcount_inc(&adap->chcr_ktls.ktls_refcount);
@ -716,7 +716,7 @@ int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
0, 1, &params, &params); 0, 1, &params, &params);
if (ret) if (ret)
return ret; return ret;
pr_info("kTLS is disabled. Restrictions on ULD support removed\n"); pr_debug("kTLS is disabled. Restrictions on ULD support removed\n");
} }
} }

Просмотреть файл

@ -44,6 +44,7 @@
#include "cxgb4.h" #include "cxgb4.h"
#define MAX_ULD_QSETS 16 #define MAX_ULD_QSETS 16
#define MAX_ULD_NPORTS 4
/* CPL message priority levels */ /* CPL message priority levels */
enum { enum {
@ -365,17 +366,10 @@ struct cxgb4_virt_res { /* virtualized HW resources */
}; };
#if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE) #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
struct ch_ktls_stats_debug { struct ch_ktls_port_stats_debug {
atomic64_t ktls_tx_connection_open; atomic64_t ktls_tx_connection_open;
atomic64_t ktls_tx_connection_fail; atomic64_t ktls_tx_connection_fail;
atomic64_t ktls_tx_connection_close; atomic64_t ktls_tx_connection_close;
atomic64_t ktls_tx_send_records;
atomic64_t ktls_tx_end_pkts;
atomic64_t ktls_tx_start_pkts;
atomic64_t ktls_tx_middle_pkts;
atomic64_t ktls_tx_retransmit_pkts;
atomic64_t ktls_tx_complete_pkts;
atomic64_t ktls_tx_trimmed_pkts;
atomic64_t ktls_tx_encrypted_packets; atomic64_t ktls_tx_encrypted_packets;
atomic64_t ktls_tx_encrypted_bytes; atomic64_t ktls_tx_encrypted_bytes;
atomic64_t ktls_tx_ctx; atomic64_t ktls_tx_ctx;
@ -384,6 +378,17 @@ struct ch_ktls_stats_debug {
atomic64_t ktls_tx_drop_no_sync_data; atomic64_t ktls_tx_drop_no_sync_data;
atomic64_t ktls_tx_drop_bypass_req; atomic64_t ktls_tx_drop_bypass_req;
}; };
struct ch_ktls_stats_debug {
struct ch_ktls_port_stats_debug ktls_port[MAX_ULD_NPORTS];
atomic64_t ktls_tx_send_records;
atomic64_t ktls_tx_end_pkts;
atomic64_t ktls_tx_start_pkts;
atomic64_t ktls_tx_middle_pkts;
atomic64_t ktls_tx_retransmit_pkts;
atomic64_t ktls_tx_complete_pkts;
atomic64_t ktls_tx_trimmed_pkts;
};
#endif #endif
struct chcr_stats_debug { struct chcr_stats_debug {

Просмотреть файл

@ -125,60 +125,6 @@ out:
return ret; return ret;
} }
static int chcr_ktls_update_connection_state(struct chcr_ktls_info *tx_info,
int new_state)
{
/* This function can be called from both rx (interrupt context) and tx
* queue contexts.
*/
spin_lock_bh(&tx_info->lock);
switch (tx_info->connection_state) {
case KTLS_CONN_CLOSED:
tx_info->connection_state = new_state;
break;
case KTLS_CONN_ACT_OPEN_REQ:
/* only go forward if state is greater than current state. */
if (new_state <= tx_info->connection_state)
break;
/* update to the next state and also initialize TCB */
tx_info->connection_state = new_state;
fallthrough;
case KTLS_CONN_ACT_OPEN_RPL:
/* if we are stuck in this state, means tcb init might not
* received by HW, try sending it again.
*/
if (!chcr_init_tcb_fields(tx_info))
tx_info->connection_state = KTLS_CONN_SET_TCB_REQ;
break;
case KTLS_CONN_SET_TCB_REQ:
/* only go forward if state is greater than current state. */
if (new_state <= tx_info->connection_state)
break;
/* update to the next state and check if l2t_state is valid */
tx_info->connection_state = new_state;
fallthrough;
case KTLS_CONN_SET_TCB_RPL:
/* Check if l2t state is valid, then move to ready state. */
if (cxgb4_check_l2t_valid(tx_info->l2te)) {
tx_info->connection_state = KTLS_CONN_TX_READY;
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_ctx);
}
break;
case KTLS_CONN_TX_READY:
/* nothing to be done here */
break;
default:
pr_err("unknown KTLS connection state\n");
break;
}
spin_unlock_bh(&tx_info->lock);
return tx_info->connection_state;
}
/* /*
* chcr_ktls_act_open_req: creates TCB entry for ipv4 connection. * chcr_ktls_act_open_req: creates TCB entry for ipv4 connection.
* @sk - tcp socket. * @sk - tcp socket.
@ -298,27 +244,17 @@ static int chcr_setup_connection(struct sock *sk,
return -EINVAL; return -EINVAL;
tx_info->atid = atid; tx_info->atid = atid;
tx_info->ip_family = sk->sk_family;
if (sk->sk_family == AF_INET) { if (tx_info->ip_family == AF_INET) {
tx_info->ip_family = AF_INET;
ret = chcr_ktls_act_open_req(sk, tx_info, atid); ret = chcr_ktls_act_open_req(sk, tx_info, atid);
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
} else { } else {
if (!sk->sk_ipv6only && ret = cxgb4_clip_get(tx_info->netdev, (const u32 *)
ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) { &sk->sk_v6_rcv_saddr,
tx_info->ip_family = AF_INET; 1);
ret = chcr_ktls_act_open_req(sk, tx_info, atid); if (ret)
} else { return ret;
tx_info->ip_family = AF_INET6; ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
ret = cxgb4_clip_get(tx_info->netdev,
(const u32 *)
&sk->sk_v6_rcv_saddr.s6_addr,
1);
if (ret)
goto out;
ret = chcr_ktls_act_open_req6(sk, tx_info, atid);
}
#endif #endif
} }
@ -326,16 +262,21 @@ static int chcr_setup_connection(struct sock *sk,
* success, if any other return type clear atid and return that failure. * success, if any other return type clear atid and return that failure.
*/ */
if (ret) { if (ret) {
if (ret == NET_XMIT_CN) if (ret == NET_XMIT_CN) {
ret = 0; ret = 0;
else } else {
#if IS_ENABLED(CONFIG_IPV6)
/* clear clip entry */
if (tx_info->ip_family == AF_INET6)
cxgb4_clip_release(tx_info->netdev,
(const u32 *)
&sk->sk_v6_rcv_saddr,
1);
#endif
cxgb4_free_atid(t, atid); cxgb4_free_atid(t, atid);
goto out; }
} }
/* update the connection state */
chcr_ktls_update_connection_state(tx_info, KTLS_CONN_ACT_OPEN_REQ);
out:
return ret; return ret;
} }
@ -396,15 +337,10 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
struct chcr_ktls_ofld_ctx_tx *tx_ctx = struct chcr_ktls_ofld_ctx_tx *tx_ctx =
chcr_get_ktls_tx_context(tls_ctx); chcr_get_ktls_tx_context(tls_ctx);
struct chcr_ktls_info *tx_info = tx_ctx->chcr_info; struct chcr_ktls_info *tx_info = tx_ctx->chcr_info;
struct sock *sk; struct ch_ktls_port_stats_debug *port_stats;
if (!tx_info) if (!tx_info)
return; return;
sk = tx_info->sk;
spin_lock(&tx_info->lock);
tx_info->connection_state = KTLS_CONN_CLOSED;
spin_unlock(&tx_info->lock);
/* clear l2t entry */ /* clear l2t entry */
if (tx_info->l2te) if (tx_info->l2te)
@ -413,8 +349,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
/* clear clip entry */ /* clear clip entry */
if (tx_info->ip_family == AF_INET6) if (tx_info->ip_family == AF_INET6)
cxgb4_clip_release(netdev, cxgb4_clip_release(netdev, (const u32 *)
(const u32 *)&sk->sk_v6_daddr.in6_u.u6_addr8, &tx_info->sk->sk_v6_rcv_saddr,
1); 1);
#endif #endif
@ -426,7 +362,8 @@ static void chcr_ktls_dev_del(struct net_device *netdev,
tx_info->tid, tx_info->ip_family); tx_info->tid, tx_info->ip_family);
} }
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_connection_close); port_stats = &tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
atomic64_inc(&port_stats->ktls_tx_connection_close);
kvfree(tx_info); kvfree(tx_info);
tx_ctx->chcr_info = NULL; tx_ctx->chcr_info = NULL;
/* release module refcount */ /* release module refcount */
@ -448,6 +385,7 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
u32 start_offload_tcp_sn) u32 start_offload_tcp_sn)
{ {
struct tls_context *tls_ctx = tls_get_ctx(sk); struct tls_context *tls_ctx = tls_get_ctx(sk);
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx; struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct chcr_ktls_info *tx_info; struct chcr_ktls_info *tx_info;
struct dst_entry *dst; struct dst_entry *dst;
@ -461,30 +399,23 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
pi = netdev_priv(netdev); pi = netdev_priv(netdev);
adap = pi->adapter; adap = pi->adapter;
port_stats = &adap->ch_ktls_stats.ktls_port[pi->port_id];
atomic64_inc(&port_stats->ktls_tx_connection_open);
if (direction == TLS_OFFLOAD_CTX_DIR_RX) { if (direction == TLS_OFFLOAD_CTX_DIR_RX) {
pr_err("not expecting for RX direction\n"); pr_err("not expecting for RX direction\n");
ret = -EINVAL;
goto out; goto out;
} }
if (tx_ctx->chcr_info) {
ret = -EINVAL; if (tx_ctx->chcr_info)
goto out; goto out;
}
tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL); tx_info = kvzalloc(sizeof(*tx_info), GFP_KERNEL);
if (!tx_info) { if (!tx_info)
ret = -ENOMEM;
goto out; goto out;
}
spin_lock_init(&tx_info->lock);
/* clear connection state */
spin_lock(&tx_info->lock);
tx_info->connection_state = KTLS_CONN_CLOSED;
spin_unlock(&tx_info->lock);
tx_info->sk = sk; tx_info->sk = sk;
spin_lock_init(&tx_info->lock);
/* initialize tid and atid to -1, 0 is a also a valid id. */ /* initialize tid and atid to -1, 0 is a also a valid id. */
tx_info->tid = -1; tx_info->tid = -1;
tx_info->atid = -1; tx_info->atid = -1;
@ -495,10 +426,12 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
tx_info->tx_chan = pi->tx_chan; tx_info->tx_chan = pi->tx_chan;
tx_info->smt_idx = pi->smt_idx; tx_info->smt_idx = pi->smt_idx;
tx_info->port_id = pi->port_id; tx_info->port_id = pi->port_id;
tx_info->prev_ack = 0;
tx_info->prev_win = 0;
tx_info->rx_qid = chcr_get_first_rx_qid(adap); tx_info->rx_qid = chcr_get_first_rx_qid(adap);
if (unlikely(tx_info->rx_qid < 0)) if (unlikely(tx_info->rx_qid < 0))
goto out2; goto free_tx_info;
tx_info->prev_seq = start_offload_tcp_sn; tx_info->prev_seq = start_offload_tcp_sn;
tx_info->tcp_start_seq_number = start_offload_tcp_sn; tx_info->tcp_start_seq_number = start_offload_tcp_sn;
@ -506,18 +439,22 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
/* save crypto keys */ /* save crypto keys */
ret = chcr_ktls_save_keys(tx_info, crypto_info, direction); ret = chcr_ktls_save_keys(tx_info, crypto_info, direction);
if (ret < 0) if (ret < 0)
goto out2; goto free_tx_info;
/* get peer ip */ /* get peer ip */
if (sk->sk_family == AF_INET) { if (sk->sk_family == AF_INET) {
memcpy(daaddr, &sk->sk_daddr, 4); memcpy(daaddr, &sk->sk_daddr, 4);
tx_info->ip_family = AF_INET;
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
} else { } else {
if (!sk->sk_ipv6only && if (!sk->sk_ipv6only &&
ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) ipv6_addr_type(&sk->sk_v6_daddr) == IPV6_ADDR_MAPPED) {
memcpy(daaddr, &sk->sk_daddr, 4); memcpy(daaddr, &sk->sk_daddr, 4);
else tx_info->ip_family = AF_INET;
} else {
memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16); memcpy(daaddr, sk->sk_v6_daddr.in6_u.u6_addr8, 16);
tx_info->ip_family = AF_INET6;
}
#endif #endif
} }
@ -525,13 +462,13 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
dst = sk_dst_get(sk); dst = sk_dst_get(sk);
if (!dst) { if (!dst) {
pr_err("DST entry not found\n"); pr_err("DST entry not found\n");
goto out2; goto free_tx_info;
} }
n = dst_neigh_lookup(dst, daaddr); n = dst_neigh_lookup(dst, daaddr);
if (!n || !n->dev) { if (!n || !n->dev) {
pr_err("neighbour not found\n"); pr_err("neighbour not found\n");
dst_release(dst); dst_release(dst);
goto out2; goto free_tx_info;
} }
tx_info->l2te = cxgb4_l2t_get(adap->l2t, n, n->dev, 0); tx_info->l2te = cxgb4_l2t_get(adap->l2t, n, n->dev, 0);
@ -540,31 +477,86 @@ static int chcr_ktls_dev_add(struct net_device *netdev, struct sock *sk,
if (!tx_info->l2te) { if (!tx_info->l2te) {
pr_err("l2t entry not found\n"); pr_err("l2t entry not found\n");
goto out2; goto free_tx_info;
} }
tx_ctx->chcr_info = tx_info; /* Driver shouldn't be removed until any single connection exists */
if (!try_module_get(THIS_MODULE))
goto free_l2t;
init_completion(&tx_info->completion);
/* create a filter and call cxgb4_l2t_send to send the packet out, which /* create a filter and call cxgb4_l2t_send to send the packet out, which
* will take care of updating l2t entry in hw if not already done. * will take care of updating l2t entry in hw if not already done.
*/ */
ret = chcr_setup_connection(sk, tx_info); tx_info->open_state = CH_KTLS_OPEN_PENDING;
if (ret)
goto out2;
/* Driver shouldn't be removed until any single connection exists */ if (chcr_setup_connection(sk, tx_info))
if (!try_module_get(THIS_MODULE)) { goto put_module;
ret = -EINVAL;
goto out2; /* Wait for reply */
wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
spin_lock_bh(&tx_info->lock);
if (tx_info->open_state) {
/* need to wait for hw response, can't free tx_info yet. */
if (tx_info->open_state == CH_KTLS_OPEN_PENDING)
tx_info->pending_close = true;
/* free the lock after the cleanup */
goto put_module;
} }
spin_unlock_bh(&tx_info->lock);
/* initialize tcb */
reinit_completion(&tx_info->completion);
/* mark it pending for hw response */
tx_info->open_state = CH_KTLS_OPEN_PENDING;
if (chcr_init_tcb_fields(tx_info))
goto free_tid;
/* Wait for reply */
wait_for_completion_timeout(&tx_info->completion, 30 * HZ);
spin_lock_bh(&tx_info->lock);
if (tx_info->open_state) {
/* need to wait for hw response, can't free tx_info yet. */
tx_info->pending_close = true;
/* free the lock after cleanup */
goto free_tid;
}
spin_unlock_bh(&tx_info->lock);
if (!cxgb4_check_l2t_valid(tx_info->l2te))
goto free_tid;
atomic64_inc(&port_stats->ktls_tx_ctx);
tx_ctx->chcr_info = tx_info;
atomic64_inc(&adap->ch_ktls_stats.ktls_tx_connection_open);
return 0; return 0;
out2:
kvfree(tx_info); free_tid:
chcr_ktls_mark_tcb_close(tx_info);
#if IS_ENABLED(CONFIG_IPV6)
/* clear clip entry */
if (tx_info->ip_family == AF_INET6)
cxgb4_clip_release(netdev, (const u32 *)
&sk->sk_v6_rcv_saddr,
1);
#endif
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tx_info->tid, tx_info->ip_family);
put_module:
/* release module refcount */
module_put(THIS_MODULE);
free_l2t:
cxgb4_l2t_release(tx_info->l2te);
free_tx_info:
if (tx_info->pending_close)
spin_unlock_bh(&tx_info->lock);
else
kvfree(tx_info);
out: out:
atomic64_inc(&adap->ch_ktls_stats.ktls_tx_connection_fail); atomic64_inc(&port_stats->ktls_tx_connection_fail);
return ret; return -1;
} }
/* /*
@ -627,20 +619,39 @@ static int chcr_ktls_cpl_act_open_rpl(struct adapter *adap,
tx_info = lookup_atid(t, atid); tx_info = lookup_atid(t, atid);
if (!tx_info || tx_info->atid != atid) { if (!tx_info || tx_info->atid != atid) {
pr_err("tx_info or atid is not correct\n"); pr_err("%s: incorrect tx_info or atid\n", __func__);
return -1; return -1;
} }
cxgb4_free_atid(t, atid);
tx_info->atid = -1;
spin_lock(&tx_info->lock);
/* HW response is very close, finish pending cleanup */
if (tx_info->pending_close) {
spin_unlock(&tx_info->lock);
if (!status) {
/* it's a late success, tcb status is establised,
* mark it close.
*/
chcr_ktls_mark_tcb_close(tx_info);
cxgb4_remove_tid(&tx_info->adap->tids, tx_info->tx_chan,
tid, tx_info->ip_family);
}
kvfree(tx_info);
return 0;
}
if (!status) { if (!status) {
tx_info->tid = tid; tx_info->tid = tid;
cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family); cxgb4_insert_tid(t, tx_info, tx_info->tid, tx_info->ip_family);
tx_info->open_state = CH_KTLS_OPEN_SUCCESS;
cxgb4_free_atid(t, atid); } else {
tx_info->atid = -1; tx_info->open_state = CH_KTLS_OPEN_FAILURE;
/* update the connection state */
chcr_ktls_update_connection_state(tx_info,
KTLS_CONN_ACT_OPEN_RPL);
} }
spin_unlock(&tx_info->lock);
complete(&tx_info->completion);
return 0; return 0;
} }
@ -658,12 +669,22 @@ static int chcr_ktls_cpl_set_tcb_rpl(struct adapter *adap, unsigned char *input)
t = &adap->tids; t = &adap->tids;
tx_info = lookup_tid(t, tid); tx_info = lookup_tid(t, tid);
if (!tx_info || tx_info->tid != tid) { if (!tx_info || tx_info->tid != tid) {
pr_err("tx_info or atid is not correct\n"); pr_err("%s: incorrect tx_info or tid\n", __func__);
return -1; return -1;
} }
/* update the connection state */
chcr_ktls_update_connection_state(tx_info, KTLS_CONN_SET_TCB_RPL); spin_lock(&tx_info->lock);
if (tx_info->pending_close) {
spin_unlock(&tx_info->lock);
kvfree(tx_info);
return 0;
}
tx_info->open_state = false;
spin_unlock(&tx_info->lock);
complete(&tx_info->completion);
return 0; return 0;
} }
@ -765,6 +786,7 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
u64 tcp_ack, u64 tcp_win) u64 tcp_ack, u64 tcp_win)
{ {
bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0)); bool first_wr = ((tx_info->prev_ack == 0) && (tx_info->prev_win == 0));
struct ch_ktls_port_stats_debug *port_stats;
u32 len, cpl = 0, ndesc, wr_len; u32 len, cpl = 0, ndesc, wr_len;
struct fw_ulptx_wr *wr; struct fw_ulptx_wr *wr;
int credits; int credits;
@ -798,12 +820,14 @@ static int chcr_ktls_xmit_tcb_cpls(struct chcr_ktls_info *tx_info,
/* reset snd una if it's a re-transmit pkt */ /* reset snd una if it's a re-transmit pkt */
if (tcp_seq != tx_info->prev_seq) { if (tcp_seq != tx_info->prev_seq) {
/* reset snd_una */ /* reset snd_una */
port_stats =
&tx_info->adap->ch_ktls_stats.ktls_port[tx_info->port_id];
pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos, pos = chcr_write_cpl_set_tcb_ulp(tx_info, q, tx_info->tid, pos,
TCB_SND_UNA_RAW_W, TCB_SND_UNA_RAW_W,
TCB_SND_UNA_RAW_V TCB_SND_UNA_RAW_V
(TCB_SND_UNA_RAW_M), (TCB_SND_UNA_RAW_M),
TCB_SND_UNA_RAW_V(0), 0); TCB_SND_UNA_RAW_V(0), 0);
atomic64_inc(&tx_info->adap->ch_ktls_stats.ktls_tx_ooo); atomic64_inc(&port_stats->ktls_tx_ooo);
cpl++; cpl++;
} }
/* update ack */ /* update ack */
@ -1836,6 +1860,7 @@ out:
/* nic tls TX handler */ /* nic tls TX handler */
static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev) static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct ch_ktls_port_stats_debug *port_stats;
struct chcr_ktls_ofld_ctx_tx *tx_ctx; struct chcr_ktls_ofld_ctx_tx *tx_ctx;
struct ch_ktls_stats_debug *stats; struct ch_ktls_stats_debug *stats;
struct tcphdr *th = tcp_hdr(skb); struct tcphdr *th = tcp_hdr(skb);
@ -1845,7 +1870,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
u32 tls_end_offset, tcp_seq; u32 tls_end_offset, tcp_seq;
struct tls_context *tls_ctx; struct tls_context *tls_ctx;
struct sk_buff *local_skb; struct sk_buff *local_skb;
int new_connection_state;
struct sge_eth_txq *q; struct sge_eth_txq *q;
struct adapter *adap; struct adapter *adap;
unsigned long flags; unsigned long flags;
@ -1868,15 +1892,6 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(!tx_info)) if (unlikely(!tx_info))
goto out; goto out;
/* check the connection state, we don't need to pass new connection
* state, state machine will check and update the new state if it is
* stuck due to responses not received from HW.
* Start the tx handling only if state is KTLS_CONN_TX_READY.
*/
new_connection_state = chcr_ktls_update_connection_state(tx_info, 0);
if (new_connection_state != KTLS_CONN_TX_READY)
goto out;
/* don't touch the original skb, make a new skb to extract each records /* don't touch the original skb, make a new skb to extract each records
* and send them separately. * and send them separately.
*/ */
@ -1887,6 +1902,7 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
adap = tx_info->adap; adap = tx_info->adap;
stats = &adap->ch_ktls_stats; stats = &adap->ch_ktls_stats;
port_stats = &stats->ktls_port[tx_info->port_id];
qidx = skb->queue_mapping; qidx = skb->queue_mapping;
q = &adap->sge.ethtxq[qidx + tx_info->first_qset]; q = &adap->sge.ethtxq[qidx + tx_info->first_qset];
@ -1932,13 +1948,13 @@ static int chcr_ktls_xmit(struct sk_buff *skb, struct net_device *dev)
*/ */
if (unlikely(!record)) { if (unlikely(!record)) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags); spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
atomic64_inc(&stats->ktls_tx_drop_no_sync_data); atomic64_inc(&port_stats->ktls_tx_drop_no_sync_data);
goto out; goto out;
} }
if (unlikely(tls_record_is_start_marker(record))) { if (unlikely(tls_record_is_start_marker(record))) {
spin_unlock_irqrestore(&tx_ctx->base.lock, flags); spin_unlock_irqrestore(&tx_ctx->base.lock, flags);
atomic64_inc(&stats->ktls_tx_skip_no_sync_data); atomic64_inc(&port_stats->ktls_tx_skip_no_sync_data);
goto out; goto out;
} }
@ -2009,9 +2025,8 @@ clear_ref:
} while (data_len > 0); } while (data_len > 0);
tx_info->prev_seq = ntohl(th->seq) + skb->data_len; tx_info->prev_seq = ntohl(th->seq) + skb->data_len;
atomic64_inc(&port_stats->ktls_tx_encrypted_packets);
atomic64_inc(&stats->ktls_tx_encrypted_packets); atomic64_add(skb->data_len, &port_stats->ktls_tx_encrypted_bytes);
atomic64_add(skb->data_len, &stats->ktls_tx_encrypted_bytes);
/* tcp finish is set, send a separate tcp msg including all the options /* tcp finish is set, send a separate tcp msg including all the options
* as well. * as well.

Просмотреть файл

@ -27,22 +27,20 @@
#define CHCR_KTLS_WR_SIZE (CHCR_PLAIN_TX_DATA_LEN +\ #define CHCR_KTLS_WR_SIZE (CHCR_PLAIN_TX_DATA_LEN +\
sizeof(struct cpl_tx_sec_pdu)) sizeof(struct cpl_tx_sec_pdu))
enum chcr_ktls_conn_state { enum ch_ktls_open_state {
KTLS_CONN_CLOSED, CH_KTLS_OPEN_SUCCESS = 0,
KTLS_CONN_ACT_OPEN_REQ, CH_KTLS_OPEN_PENDING = 1,
KTLS_CONN_ACT_OPEN_RPL, CH_KTLS_OPEN_FAILURE = 2,
KTLS_CONN_SET_TCB_REQ,
KTLS_CONN_SET_TCB_RPL,
KTLS_CONN_TX_READY,
}; };
struct chcr_ktls_info { struct chcr_ktls_info {
struct sock *sk; struct sock *sk;
spinlock_t lock; /* state machine lock */ spinlock_t lock; /* lock for pending_close */
struct ktls_key_ctx key_ctx; struct ktls_key_ctx key_ctx;
struct adapter *adap; struct adapter *adap;
struct l2t_entry *l2te; struct l2t_entry *l2te;
struct net_device *netdev; struct net_device *netdev;
struct completion completion;
u64 iv; u64 iv;
u64 record_no; u64 record_no;
int tid; int tid;
@ -58,13 +56,14 @@ struct chcr_ktls_info {
u32 tcp_start_seq_number; u32 tcp_start_seq_number;
u32 scmd0_short_seqno_numivs; u32 scmd0_short_seqno_numivs;
u32 scmd0_short_ivgen_hdrlen; u32 scmd0_short_ivgen_hdrlen;
enum chcr_ktls_conn_state connection_state;
u16 prev_win; u16 prev_win;
u8 tx_chan; u8 tx_chan;
u8 smt_idx; u8 smt_idx;
u8 port_id; u8 port_id;
u8 ip_family; u8 ip_family;
u8 first_qset; u8 first_qset;
enum ch_ktls_open_state open_state;
bool pending_close;
}; };
struct chcr_ktls_ofld_ctx_tx { struct chcr_ktls_ofld_ctx_tx {