staging: lustre: Fix typo in lustre/lnet
Correct spelling typo in comments and debug messages. Signed-off-by: Masanari Iida <standby24x7@gmail.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Родитель
30224b5e0b
Коммит
2b28432642
|
@ -619,7 +619,7 @@ typedef struct lnet_portal {
|
||||||
unsigned int ptl_index; /* portal ID, reserved */
|
unsigned int ptl_index; /* portal ID, reserved */
|
||||||
/* flags on this portal: lazy, unique... */
|
/* flags on this portal: lazy, unique... */
|
||||||
unsigned int ptl_options;
|
unsigned int ptl_options;
|
||||||
/* list of messags which are stealing buffer */
|
/* list of messages which are stealing buffer */
|
||||||
struct list_head ptl_msg_stealing;
|
struct list_head ptl_msg_stealing;
|
||||||
/* messages blocking for MD */
|
/* messages blocking for MD */
|
||||||
struct list_head ptl_msg_delayed;
|
struct list_head ptl_msg_delayed;
|
||||||
|
|
|
@ -1441,7 +1441,7 @@ ksocknal_close_conn_locked (ksock_conn_t *conn, int error)
|
||||||
|
|
||||||
conn->ksnc_route = NULL;
|
conn->ksnc_route = NULL;
|
||||||
|
|
||||||
#if 0 /* irrelevent with only eager routes */
|
#if 0 /* irrelevant with only eager routes */
|
||||||
/* make route least favourite */
|
/* make route least favourite */
|
||||||
list_del (&route->ksnr_list);
|
list_del (&route->ksnr_list);
|
||||||
list_add_tail (&route->ksnr_list, &peer->ksnp_routes);
|
list_add_tail (&route->ksnr_list, &peer->ksnp_routes);
|
||||||
|
@ -1496,7 +1496,7 @@ ksocknal_peer_failed (ksock_peer_t *peer)
|
||||||
|
|
||||||
/* There has been a connection failure or comms error; but I'll only
|
/* There has been a connection failure or comms error; but I'll only
|
||||||
* tell LNET I think the peer is dead if it's to another kernel and
|
* tell LNET I think the peer is dead if it's to another kernel and
|
||||||
* there are no connections or connection attempts in existance. */
|
* there are no connections or connection attempts in existence. */
|
||||||
|
|
||||||
read_lock(&ksocknal_data.ksnd_global_lock);
|
read_lock(&ksocknal_data.ksnd_global_lock);
|
||||||
|
|
||||||
|
@ -2358,7 +2358,7 @@ ksocknal_new_incarnation (void)
|
||||||
/* The incarnation number is the time this module loaded and it
|
/* The incarnation number is the time this module loaded and it
|
||||||
* identifies this particular instance of the socknal. Hopefully
|
* identifies this particular instance of the socknal. Hopefully
|
||||||
* we won't be able to reboot more frequently than 1MHz for the
|
* we won't be able to reboot more frequently than 1MHz for the
|
||||||
* forseeable future :) */
|
* foreseeable future :) */
|
||||||
|
|
||||||
do_gettimeofday(&tv);
|
do_gettimeofday(&tv);
|
||||||
|
|
||||||
|
|
|
@ -1116,7 +1116,7 @@ ksocknal_process_receive (ksock_conn_t *conn)
|
||||||
LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
|
LASSERT (atomic_read(&conn->ksnc_conn_refcount) > 0);
|
||||||
|
|
||||||
/* NB: sched lock NOT held */
|
/* NB: sched lock NOT held */
|
||||||
/* SOCKNAL_RX_LNET_HEADER is here for backward compatability */
|
/* SOCKNAL_RX_LNET_HEADER is here for backward compatibility */
|
||||||
LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
|
LASSERT (conn->ksnc_rx_state == SOCKNAL_RX_KSM_HEADER ||
|
||||||
conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
|
conn->ksnc_rx_state == SOCKNAL_RX_LNET_PAYLOAD ||
|
||||||
conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
|
conn->ksnc_rx_state == SOCKNAL_RX_LNET_HEADER ||
|
||||||
|
@ -1985,7 +1985,7 @@ ksocknal_connect (ksock_route_t *route)
|
||||||
list_splice_init(&peer->ksnp_tx_queue, &zombies);
|
list_splice_init(&peer->ksnp_tx_queue, &zombies);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0 /* irrelevent with only eager routes */
|
#if 0 /* irrelevant with only eager routes */
|
||||||
if (!route->ksnr_deleted) {
|
if (!route->ksnr_deleted) {
|
||||||
/* make this route least-favourite for re-selection */
|
/* make this route least-favourite for re-selection */
|
||||||
list_del(&route->ksnr_list);
|
list_del(&route->ksnr_list);
|
||||||
|
|
|
@ -218,7 +218,7 @@ ksocknal_queue_tx_zcack_v3(ksock_conn_t *conn,
|
||||||
if (tx->tx_msg.ksm_zc_cookies[0] > tx->tx_msg.ksm_zc_cookies[1]) {
|
if (tx->tx_msg.ksm_zc_cookies[0] > tx->tx_msg.ksm_zc_cookies[1]) {
|
||||||
__u64 tmp = 0;
|
__u64 tmp = 0;
|
||||||
|
|
||||||
/* two seperated cookies: (a+2, a) or (a+1, a) */
|
/* two separated cookies: (a+2, a) or (a+1, a) */
|
||||||
LASSERT (tx->tx_msg.ksm_zc_cookies[0] -
|
LASSERT (tx->tx_msg.ksm_zc_cookies[0] -
|
||||||
tx->tx_msg.ksm_zc_cookies[1] <= 2);
|
tx->tx_msg.ksm_zc_cookies[1] <= 2);
|
||||||
|
|
||||||
|
|
|
@ -316,7 +316,7 @@ lnet_msg_attach_md(lnet_msg_t *msg, lnet_libmd_t *md,
|
||||||
LASSERT(!msg->msg_routing);
|
LASSERT(!msg->msg_routing);
|
||||||
|
|
||||||
msg->msg_md = md;
|
msg->msg_md = md;
|
||||||
if (msg->msg_receiving) { /* commited for receiving */
|
if (msg->msg_receiving) { /* committed for receiving */
|
||||||
msg->msg_offset = offset;
|
msg->msg_offset = offset;
|
||||||
msg->msg_wanted = mlen;
|
msg->msg_wanted = mlen;
|
||||||
}
|
}
|
||||||
|
@ -392,7 +392,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
|
||||||
* NB: message is committed for sending, we should return
|
* NB: message is committed for sending, we should return
|
||||||
* on success because LND will finalize this message later.
|
* on success because LND will finalize this message later.
|
||||||
*
|
*
|
||||||
* Also, there is possibility that message is commited for
|
* Also, there is possibility that message is committed for
|
||||||
* sending and also failed before delivering to LND,
|
* sending and also failed before delivering to LND,
|
||||||
* i.e: ENOMEM, in that case we can't fall through either
|
* i.e: ENOMEM, in that case we can't fall through either
|
||||||
* because CPT for sending can be different with CPT for
|
* because CPT for sending can be different with CPT for
|
||||||
|
@ -414,7 +414,7 @@ lnet_complete_msg_locked(lnet_msg_t *msg, int cpt)
|
||||||
* NB: message is committed for sending, we should return
|
* NB: message is committed for sending, we should return
|
||||||
* on success because LND will finalize this message later.
|
* on success because LND will finalize this message later.
|
||||||
*
|
*
|
||||||
* Also, there is possibility that message is commited for
|
* Also, there is possibility that message is committed for
|
||||||
* sending and also failed before delivering to LND,
|
* sending and also failed before delivering to LND,
|
||||||
* i.e: ENOMEM, in that case we can't fall through either:
|
* i.e: ENOMEM, in that case we can't fall through either:
|
||||||
* - The rule is message must decommit for sending first if
|
* - The rule is message must decommit for sending first if
|
||||||
|
@ -474,14 +474,14 @@ lnet_finalize (lnet_ni_t *ni, lnet_msg_t *msg, int status)
|
||||||
again:
|
again:
|
||||||
rc = 0;
|
rc = 0;
|
||||||
if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
|
if (!msg->msg_tx_committed && !msg->msg_rx_committed) {
|
||||||
/* not commited to network yet */
|
/* not committed to network yet */
|
||||||
LASSERT(!msg->msg_onactivelist);
|
LASSERT(!msg->msg_onactivelist);
|
||||||
lnet_msg_free(msg);
|
lnet_msg_free(msg);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* NB: routed message can be commited for both receiving and sending,
|
* NB: routed message can be committed for both receiving and sending,
|
||||||
* we should finalize in LIFO order and keep counters correct.
|
* we should finalize in LIFO order and keep counters correct.
|
||||||
* (finalize sending first then finalize receiving)
|
* (finalize sending first then finalize receiving)
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -361,7 +361,7 @@ brw_server_rpc_done (srpc_server_rpc_t *rpc)
|
||||||
blk->bk_sink ? "from" : "to",
|
blk->bk_sink ? "from" : "to",
|
||||||
libcfs_id2str(rpc->srpc_peer), rpc->srpc_status);
|
libcfs_id2str(rpc->srpc_peer), rpc->srpc_status);
|
||||||
else
|
else
|
||||||
CDEBUG (D_NET, "Transfered %d pages bulk data %s %s\n",
|
CDEBUG (D_NET, "Transferred %d pages bulk data %s %s\n",
|
||||||
blk->bk_niov, blk->bk_sink ? "from" : "to",
|
blk->bk_niov, blk->bk_sink ? "from" : "to",
|
||||||
libcfs_id2str(rpc->srpc_peer));
|
libcfs_id2str(rpc->srpc_peer));
|
||||||
|
|
||||||
|
|
|
@ -1356,7 +1356,7 @@ lstcon_rpc_cleanup_wait(void)
|
||||||
|
|
||||||
lst_wait_until((atomic_read(&console_session.ses_rpc_counter) == 0),
|
lst_wait_until((atomic_read(&console_session.ses_rpc_counter) == 0),
|
||||||
console_session.ses_rpc_lock,
|
console_session.ses_rpc_lock,
|
||||||
"Network is not accessable or target is down, "
|
"Network is not accessible or target is down, "
|
||||||
"waiting for %d console RPCs to being recycled\n",
|
"waiting for %d console RPCs to being recycled\n",
|
||||||
atomic_read(&console_session.ses_rpc_counter));
|
atomic_read(&console_session.ses_rpc_counter));
|
||||||
|
|
||||||
|
|
|
@ -1117,7 +1117,7 @@ srpc_del_client_rpc_timer (srpc_client_rpc_t *rpc)
|
||||||
if (rpc->crpc_timeout == 0)
|
if (rpc->crpc_timeout == 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* timer sucessfully defused */
|
/* timer successfully defused */
|
||||||
if (stt_del_timer(&rpc->crpc_timer))
|
if (stt_del_timer(&rpc->crpc_timer))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
Загрузка…
Ссылка в новой задаче