Pull Request for 3.13
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.11 (GNU/Linux) iQIcBAABAgAGBQJSZW0dAAoJEEajxTw9cn4HMp8P/Ap2hwIHFfbdfshktNSLy6Xi xXq46qFh5IaiKE2+t3P4pDdr9UvaXD92ijVWSO0eZr2Y3cQ896/v7bakQt493BFB sRylmto7pzTGfA+tY5X498kBYKIHlKYdcY6sF89cOmMje8IMtwdlg4vWPbGXxFz5 z7VvIJkgTMDNOtJre93N0oSAfQf5iH9X7fnLGSsYntVTXoBrjzWHPq1547lhI7uR Zt1yTP5kvo3lvpLZuDB2+sly+Q7qsOJIN4teYIM0+gnIKXFqd2GGGw+MaFnx/jcu JIJuXkcDNxThLoT311IHPedWKt35liAI2aIy6z5uVeX/aGarcji0l2iDbJ7xswoL SMu6gQwdnNIZRFPupjWQCGd41wph1woo0W69a90Q57Sz9Yn8/owHhbApRHp2eCKx mz/AJ6wL4+Nq0oniRsxjFf8BJaqzbWxsp3hAcXXpsuliAH00D5Px0go/B80L1y32 R3NJ8earBo/cLaElmEdKT8fVuyiLdT4ckOVGF+MKuoXr+XfsI/p9o2wc/JZ1gTTH UfU5ee0d2BCj6tgh2QVsLXra9aHbXUa39YREUlO4QWG5ze0d+P/LMDCSlO+lxMk7 lUQRgQ91UQYpBOrSGzPcvSMmqg7vD+d4+PkTdIbaZtQUdQ1HLL0ARk6hFd14Cm2A 3CBlGvw/pPlk7+WQP92q =6dOX -----END PGP SIGNATURE----- Merge tag 'fcoe-3.13' into for-linus Pull Request for 3.13 for FCOE tree. Signed-off-by: James Bottomley <JBottomley@Parallels.com>
This commit is contained in:
Коммит
323f6226a8
|
@ -542,8 +542,7 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
|
|||
vn_port = fc_vport_id_lookup(lport, ntoh24(fh->fh_d_id));
|
||||
if (vn_port) {
|
||||
port = lport_priv(vn_port);
|
||||
if (compare_ether_addr(port->data_src_addr, dest_mac)
|
||||
!= 0) {
|
||||
if (!ether_addr_equal(port->data_src_addr, dest_mac)) {
|
||||
BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
|
||||
put_cpu();
|
||||
kfree_skb(skb);
|
||||
|
@ -1381,6 +1380,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
|
|||
return NULL;
|
||||
}
|
||||
ctlr = fcoe_ctlr_device_priv(ctlr_dev);
|
||||
ctlr->cdev = ctlr_dev;
|
||||
interface = fcoe_ctlr_priv(ctlr);
|
||||
dev_hold(netdev);
|
||||
kref_init(&interface->kref);
|
||||
|
|
|
@ -408,6 +408,7 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
|
|||
}
|
||||
|
||||
ctlr = fcoe_ctlr_device_priv(ctlr_dev);
|
||||
ctlr->cdev = ctlr_dev;
|
||||
fcoe = fcoe_ctlr_priv(ctlr);
|
||||
|
||||
dev_hold(netdev);
|
||||
|
@ -1440,22 +1441,28 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
|
|||
ctlr = fcoe_to_ctlr(fcoe);
|
||||
lport = ctlr->lp;
|
||||
if (unlikely(!lport)) {
|
||||
FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
|
||||
FCOE_NETDEV_DBG(netdev, "Cannot find hba structure\n");
|
||||
goto err2;
|
||||
}
|
||||
if (!lport->link_up)
|
||||
goto err2;
|
||||
|
||||
FCOE_NETDEV_DBG(netdev, "skb_info: len:%d data_len:%d head:%p "
|
||||
"data:%p tail:%p end:%p sum:%d dev:%s",
|
||||
FCOE_NETDEV_DBG(netdev,
|
||||
"skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n",
|
||||
skb->len, skb->data_len, skb->head, skb->data,
|
||||
skb_tail_pointer(skb), skb_end_pointer(skb),
|
||||
skb->csum, skb->dev ? skb->dev->name : "<NULL>");
|
||||
|
||||
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
|
||||
if (skb == NULL)
|
||||
return NET_RX_DROP;
|
||||
|
||||
eh = eth_hdr(skb);
|
||||
|
||||
if (is_fip_mode(ctlr) &&
|
||||
compare_ether_addr(eh->h_source, ctlr->dest_addr)) {
|
||||
!ether_addr_equal(eh->h_source, ctlr->dest_addr)) {
|
||||
FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
|
||||
eh->h_source);
|
||||
goto err;
|
||||
|
@ -1540,13 +1547,13 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
|
|||
wake_up_process(fps->thread);
|
||||
spin_unlock(&fps->fcoe_rx_list.lock);
|
||||
|
||||
return 0;
|
||||
return NET_RX_SUCCESS;
|
||||
err:
|
||||
per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++;
|
||||
put_cpu();
|
||||
err2:
|
||||
kfree_skb(skb);
|
||||
return -1;
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1788,13 +1795,13 @@ static void fcoe_recv_frame(struct sk_buff *skb)
|
|||
lport = fr->fr_dev;
|
||||
if (unlikely(!lport)) {
|
||||
if (skb->destructor != fcoe_percpu_flush_done)
|
||||
FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
|
||||
FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb\n");
|
||||
kfree_skb(skb);
|
||||
return;
|
||||
}
|
||||
|
||||
FCOE_NETDEV_DBG(skb->dev, "skb_info: len:%d data_len:%d "
|
||||
"head:%p data:%p tail:%p end:%p sum:%d dev:%s",
|
||||
FCOE_NETDEV_DBG(skb->dev,
|
||||
"skb_info: len:%d data_len:%d head:%p data:%p tail:%p end:%p sum:%d dev:%s\n",
|
||||
skb->len, skb->data_len,
|
||||
skb->head, skb->data, skb_tail_pointer(skb),
|
||||
skb_end_pointer(skb), skb->csum,
|
||||
|
|
|
@ -160,30 +160,50 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
|
|||
}
|
||||
EXPORT_SYMBOL(fcoe_ctlr_init);
|
||||
|
||||
/**
|
||||
* fcoe_sysfs_fcf_add() - Add a fcoe_fcf{,_device} to a fcoe_ctlr{,_device}
|
||||
* @new: The newly discovered FCF
|
||||
*
|
||||
* Called with fip->ctlr_mutex held
|
||||
*/
|
||||
static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
|
||||
{
|
||||
struct fcoe_ctlr *fip = new->fip;
|
||||
struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
|
||||
struct fcoe_fcf_device temp, *fcf_dev;
|
||||
int rc = 0;
|
||||
struct fcoe_ctlr_device *ctlr_dev;
|
||||
struct fcoe_fcf_device *temp, *fcf_dev;
|
||||
int rc = -ENOMEM;
|
||||
|
||||
LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
|
||||
new->fabric_name, new->fcf_mac);
|
||||
|
||||
temp = kzalloc(sizeof(*temp), GFP_KERNEL);
|
||||
if (!temp)
|
||||
goto out;
|
||||
|
||||
temp->fabric_name = new->fabric_name;
|
||||
temp->switch_name = new->switch_name;
|
||||
temp->fc_map = new->fc_map;
|
||||
temp->vfid = new->vfid;
|
||||
memcpy(temp->mac, new->fcf_mac, ETH_ALEN);
|
||||
temp->priority = new->pri;
|
||||
temp->fka_period = new->fka_period;
|
||||
temp->selected = 0; /* default to unselected */
|
||||
|
||||
/*
|
||||
* If ctlr_dev doesn't exist then it means we're a libfcoe user
|
||||
* who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device.
|
||||
* fnic would be an example of a driver with this behavior. In this
|
||||
* case we want to add the fcoe_fcf to the fcoe_ctlr list, but we
|
||||
* don't want to make sysfs changes.
|
||||
*/
|
||||
|
||||
ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
|
||||
if (ctlr_dev) {
|
||||
mutex_lock(&ctlr_dev->lock);
|
||||
|
||||
temp.fabric_name = new->fabric_name;
|
||||
temp.switch_name = new->switch_name;
|
||||
temp.fc_map = new->fc_map;
|
||||
temp.vfid = new->vfid;
|
||||
memcpy(temp.mac, new->fcf_mac, ETH_ALEN);
|
||||
temp.priority = new->pri;
|
||||
temp.fka_period = new->fka_period;
|
||||
temp.selected = 0; /* default to unselected */
|
||||
|
||||
fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp);
|
||||
fcf_dev = fcoe_fcf_device_add(ctlr_dev, temp);
|
||||
if (unlikely(!fcf_dev)) {
|
||||
rc = -ENOMEM;
|
||||
mutex_unlock(&ctlr_dev->lock);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -201,33 +221,52 @@ static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
|
|||
|
||||
fcf_dev->priv = new;
|
||||
new->fcf_dev = fcf_dev;
|
||||
mutex_unlock(&ctlr_dev->lock);
|
||||
}
|
||||
|
||||
list_add(&new->list, &fip->fcfs);
|
||||
fip->fcf_count++;
|
||||
rc = 0;
|
||||
|
||||
out:
|
||||
mutex_unlock(&ctlr_dev->lock);
|
||||
kfree(temp);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* fcoe_sysfs_fcf_del() - Remove a fcoe_fcf{,_device} to a fcoe_ctlr{,_device}
|
||||
* @new: The FCF to be removed
|
||||
*
|
||||
* Called with fip->ctlr_mutex held
|
||||
*/
|
||||
static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
|
||||
{
|
||||
struct fcoe_ctlr *fip = new->fip;
|
||||
struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
|
||||
struct fcoe_ctlr_device *cdev;
|
||||
struct fcoe_fcf_device *fcf_dev;
|
||||
|
||||
list_del(&new->list);
|
||||
fip->fcf_count--;
|
||||
|
||||
mutex_lock(&ctlr_dev->lock);
|
||||
|
||||
/*
|
||||
* If ctlr_dev doesn't exist then it means we're a libfcoe user
|
||||
* who doesn't use fcoe_syfs and didn't allocate a fcoe_ctlr_device
|
||||
* or a fcoe_fcf_device.
|
||||
*
|
||||
* fnic would be an example of a driver with this behavior. In this
|
||||
* case we want to remove the fcoe_fcf from the fcoe_ctlr list (above),
|
||||
* but we don't want to make sysfs changes.
|
||||
*/
|
||||
cdev = fcoe_ctlr_to_ctlr_dev(fip);
|
||||
if (cdev) {
|
||||
mutex_lock(&cdev->lock);
|
||||
fcf_dev = fcoe_fcf_to_fcf_dev(new);
|
||||
WARN_ON(!fcf_dev);
|
||||
new->fcf_dev = NULL;
|
||||
fcoe_fcf_device_delete(fcf_dev);
|
||||
kfree(new);
|
||||
|
||||
mutex_unlock(&ctlr_dev->lock);
|
||||
mutex_unlock(&cdev->lock);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -300,7 +339,7 @@ static void fcoe_ctlr_announce(struct fcoe_ctlr *fip)
|
|||
spin_unlock_bh(&fip->ctlr_lock);
|
||||
sel = fip->sel_fcf;
|
||||
|
||||
if (sel && !compare_ether_addr(sel->fcf_mac, fip->dest_addr))
|
||||
if (sel && ether_addr_equal(sel->fcf_mac, fip->dest_addr))
|
||||
goto unlock;
|
||||
if (!is_zero_ether_addr(fip->dest_addr)) {
|
||||
printk(KERN_NOTICE "libfcoe: host%d: "
|
||||
|
@ -1000,7 +1039,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
|||
if (fcf->switch_name == new.switch_name &&
|
||||
fcf->fabric_name == new.fabric_name &&
|
||||
fcf->fc_map == new.fc_map &&
|
||||
compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) {
|
||||
ether_addr_equal(fcf->fcf_mac, new.fcf_mac)) {
|
||||
found = 1;
|
||||
break;
|
||||
}
|
||||
|
@ -1340,7 +1379,7 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
|
|||
mp = (struct fip_mac_desc *)desc;
|
||||
if (dlen < sizeof(*mp))
|
||||
goto err;
|
||||
if (compare_ether_addr(mp->fd_mac, fcf->fcf_mac))
|
||||
if (!ether_addr_equal(mp->fd_mac, fcf->fcf_mac))
|
||||
goto err;
|
||||
desc_mask &= ~BIT(FIP_DT_MAC);
|
||||
break;
|
||||
|
@ -1418,8 +1457,8 @@ static void fcoe_ctlr_recv_clr_vlink(struct fcoe_ctlr *fip,
|
|||
* 'port_id' is already validated, check MAC address and
|
||||
* wwpn
|
||||
*/
|
||||
if (compare_ether_addr(fip->get_src_addr(vn_port),
|
||||
vp->fd_mac) != 0 ||
|
||||
if (!ether_addr_equal(fip->get_src_addr(vn_port),
|
||||
vp->fd_mac) ||
|
||||
get_unaligned_be64(&vp->fd_wwpn) !=
|
||||
vn_port->wwpn)
|
||||
continue;
|
||||
|
@ -1453,6 +1492,9 @@ err:
|
|||
*/
|
||||
void fcoe_ctlr_recv(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
||||
{
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return;
|
||||
skb_queue_tail(&fip->fip_recv_list, skb);
|
||||
schedule_work(&fip->recv_work);
|
||||
}
|
||||
|
@ -1479,12 +1521,12 @@ static int fcoe_ctlr_recv_handler(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
|||
goto drop;
|
||||
eh = eth_hdr(skb);
|
||||
if (fip->mode == FIP_MODE_VN2VN) {
|
||||
if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) &&
|
||||
compare_ether_addr(eh->h_dest, fcoe_all_vn2vn) &&
|
||||
compare_ether_addr(eh->h_dest, fcoe_all_p2p))
|
||||
if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) &&
|
||||
!ether_addr_equal(eh->h_dest, fcoe_all_vn2vn) &&
|
||||
!ether_addr_equal(eh->h_dest, fcoe_all_p2p))
|
||||
goto drop;
|
||||
} else if (compare_ether_addr(eh->h_dest, fip->ctl_src_addr) &&
|
||||
compare_ether_addr(eh->h_dest, fcoe_all_enode))
|
||||
} else if (!ether_addr_equal(eh->h_dest, fip->ctl_src_addr) &&
|
||||
!ether_addr_equal(eh->h_dest, fcoe_all_enode))
|
||||
goto drop;
|
||||
fiph = (struct fip_header *)skb->data;
|
||||
op = ntohs(fiph->fip_op);
|
||||
|
@ -1856,7 +1898,7 @@ int fcoe_ctlr_recv_flogi(struct fcoe_ctlr *fip, struct fc_lport *lport,
|
|||
* address_mode flag to use FC_OUI-based Ethernet DA.
|
||||
* Otherwise we use the FCoE gateway addr
|
||||
*/
|
||||
if (!compare_ether_addr(sa, (u8[6])FC_FCOE_FLOGI_MAC)) {
|
||||
if (ether_addr_equal(sa, (u8[6])FC_FCOE_FLOGI_MAC)) {
|
||||
fcoe_ctlr_map_dest(fip);
|
||||
} else {
|
||||
memcpy(fip->dest_addr, sa, ETH_ALEN);
|
||||
|
@ -2825,7 +2867,7 @@ unlock:
|
|||
* disabled, so that should ensure that this routine is only called
|
||||
* when nothing is happening.
|
||||
*/
|
||||
void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip,
|
||||
static void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip,
|
||||
enum fip_state fip_mode)
|
||||
{
|
||||
void *priv;
|
||||
|
|
|
@ -300,29 +300,29 @@ static ssize_t store_ctlr_mode(struct device *dev,
|
|||
|
||||
switch (ctlr->enabled) {
|
||||
case FCOE_CTLR_ENABLED:
|
||||
LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.");
|
||||
LIBFCOE_SYSFS_DBG(ctlr, "Cannot change mode when enabled.\n");
|
||||
return -EBUSY;
|
||||
case FCOE_CTLR_DISABLED:
|
||||
if (!ctlr->f->set_fcoe_ctlr_mode) {
|
||||
LIBFCOE_SYSFS_DBG(ctlr,
|
||||
"Mode change not supported by LLD.");
|
||||
"Mode change not supported by LLD.\n");
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
ctlr->mode = fcoe_parse_mode(mode);
|
||||
if (ctlr->mode == FIP_CONN_TYPE_UNKNOWN) {
|
||||
LIBFCOE_SYSFS_DBG(ctlr,
|
||||
"Unknown mode %s provided.", buf);
|
||||
LIBFCOE_SYSFS_DBG(ctlr, "Unknown mode %s provided.\n",
|
||||
buf);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctlr->f->set_fcoe_ctlr_mode(ctlr);
|
||||
LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.", buf);
|
||||
LIBFCOE_SYSFS_DBG(ctlr, "Mode changed to %s.\n", buf);
|
||||
|
||||
return count;
|
||||
case FCOE_CTLR_UNUSED:
|
||||
default:
|
||||
LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.");
|
||||
LIBFCOE_SYSFS_DBG(ctlr, "Mode change not supported.\n");
|
||||
return -ENOTSUPP;
|
||||
};
|
||||
}
|
||||
|
@ -653,7 +653,7 @@ static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,
|
|||
if (new->switch_name == old->switch_name &&
|
||||
new->fabric_name == old->fabric_name &&
|
||||
new->fc_map == old->fc_map &&
|
||||
compare_ether_addr(new->mac, old->mac) == 0)
|
||||
ether_addr_equal(new->mac, old->mac))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -658,13 +658,13 @@ void fnic_update_mac_locked(struct fnic *fnic, u8 *new)
|
|||
|
||||
if (is_zero_ether_addr(new))
|
||||
new = ctl;
|
||||
if (!compare_ether_addr(data, new))
|
||||
if (ether_addr_equal(data, new))
|
||||
return;
|
||||
FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host, "update_mac %pM\n", new);
|
||||
if (!is_zero_ether_addr(data) && compare_ether_addr(data, ctl))
|
||||
if (!is_zero_ether_addr(data) && !ether_addr_equal(data, ctl))
|
||||
vnic_dev_del_addr(fnic->vdev, data);
|
||||
memcpy(data, new, ETH_ALEN);
|
||||
if (compare_ether_addr(new, ctl))
|
||||
if (!ether_addr_equal(new, ctl))
|
||||
vnic_dev_add_addr(fnic->vdev, new);
|
||||
}
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/log2.h>
|
||||
|
||||
#include <scsi/fc/fc_fc2.h>
|
||||
|
||||
|
@ -303,10 +304,7 @@ static void fc_exch_setup_hdr(struct fc_exch *ep, struct fc_frame *fp,
|
|||
fr_eof(fp) = FC_EOF_N;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize remainig fh fields
|
||||
* from fc_fill_fc_hdr
|
||||
*/
|
||||
/* Initialize remaining fh fields from fc_fill_fc_hdr */
|
||||
fh->fh_ox_id = htons(ep->oxid);
|
||||
fh->fh_rx_id = htons(ep->rxid);
|
||||
fh->fh_seq_id = ep->seq.id;
|
||||
|
@ -362,9 +360,10 @@ static inline void fc_exch_timer_set_locked(struct fc_exch *ep,
|
|||
|
||||
FC_EXCH_DBG(ep, "Exchange timer armed : %d msecs\n", timer_msec);
|
||||
|
||||
if (queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
|
||||
msecs_to_jiffies(timer_msec)))
|
||||
fc_exch_hold(ep); /* hold for timer */
|
||||
if (!queue_delayed_work(fc_exch_workqueue, &ep->timeout_work,
|
||||
msecs_to_jiffies(timer_msec)))
|
||||
fc_exch_release(ep);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -382,6 +381,8 @@ static void fc_exch_timer_set(struct fc_exch *ep, unsigned int timer_msec)
|
|||
/**
|
||||
* fc_exch_done_locked() - Complete an exchange with the exchange lock held
|
||||
* @ep: The exchange that is complete
|
||||
*
|
||||
* Note: May sleep if invoked from outside a response handler.
|
||||
*/
|
||||
static int fc_exch_done_locked(struct fc_exch *ep)
|
||||
{
|
||||
|
@ -393,7 +394,6 @@ static int fc_exch_done_locked(struct fc_exch *ep)
|
|||
* ep, and in that case we only clear the resp and set it as
|
||||
* complete, so it can be reused by the timer to send the rrq.
|
||||
*/
|
||||
ep->resp = NULL;
|
||||
if (ep->state & FC_EX_DONE)
|
||||
return rc;
|
||||
ep->esb_stat |= ESB_ST_COMPLETE;
|
||||
|
@ -468,11 +468,17 @@ static int fc_seq_send_locked(struct fc_lport *lport, struct fc_seq *sp,
|
|||
{
|
||||
struct fc_exch *ep;
|
||||
struct fc_frame_header *fh = fc_frame_header_get(fp);
|
||||
int error;
|
||||
int error = -ENXIO;
|
||||
u32 f_ctl;
|
||||
u8 fh_type = fh->fh_type;
|
||||
|
||||
ep = fc_seq_exch(sp);
|
||||
|
||||
if (ep->esb_stat & (ESB_ST_COMPLETE | ESB_ST_ABNORMAL)) {
|
||||
fc_frame_free(fp);
|
||||
goto out;
|
||||
}
|
||||
|
||||
WARN_ON(!(ep->esb_stat & ESB_ST_SEQ_INIT));
|
||||
|
||||
f_ctl = ntoh24(fh->fh_f_ctl);
|
||||
|
@ -515,6 +521,9 @@ out:
|
|||
* @lport: The local port that the exchange will be sent on
|
||||
* @sp: The sequence to be sent
|
||||
* @fp: The frame to be sent on the exchange
|
||||
*
|
||||
* Note: The frame will be freed either by a direct call to fc_frame_free(fp)
|
||||
* or indirectly by calling libfc_function_template.frame_send().
|
||||
*/
|
||||
static int fc_seq_send(struct fc_lport *lport, struct fc_seq *sp,
|
||||
struct fc_frame *fp)
|
||||
|
@ -581,6 +590,8 @@ static struct fc_seq *fc_seq_start_next(struct fc_seq *sp)
|
|||
|
||||
/*
|
||||
* Set the response handler for the exchange associated with a sequence.
|
||||
*
|
||||
* Note: May sleep if invoked from outside a response handler.
|
||||
*/
|
||||
static void fc_seq_set_resp(struct fc_seq *sp,
|
||||
void (*resp)(struct fc_seq *, struct fc_frame *,
|
||||
|
@ -588,8 +599,18 @@ static void fc_seq_set_resp(struct fc_seq *sp,
|
|||
void *arg)
|
||||
{
|
||||
struct fc_exch *ep = fc_seq_exch(sp);
|
||||
DEFINE_WAIT(wait);
|
||||
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
while (ep->resp_active && ep->resp_task != current) {
|
||||
prepare_to_wait(&ep->resp_wq, &wait, TASK_UNINTERRUPTIBLE);
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
|
||||
schedule();
|
||||
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
}
|
||||
finish_wait(&ep->resp_wq, &wait);
|
||||
ep->resp = resp;
|
||||
ep->arg = arg;
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
|
@ -622,27 +643,31 @@ static int fc_exch_abort_locked(struct fc_exch *ep,
|
|||
if (!sp)
|
||||
return -ENOMEM;
|
||||
|
||||
ep->esb_stat |= ESB_ST_SEQ_INIT | ESB_ST_ABNORMAL;
|
||||
if (timer_msec)
|
||||
fc_exch_timer_set_locked(ep, timer_msec);
|
||||
|
||||
/*
|
||||
* If not logged into the fabric, don't send ABTS but leave
|
||||
* sequence active until next timeout.
|
||||
*/
|
||||
if (!ep->sid)
|
||||
return 0;
|
||||
|
||||
if (ep->sid) {
|
||||
/*
|
||||
* Send an abort for the sequence that timed out.
|
||||
*/
|
||||
fp = fc_frame_alloc(ep->lp, 0);
|
||||
if (fp) {
|
||||
ep->esb_stat |= ESB_ST_SEQ_INIT;
|
||||
fc_fill_fc_hdr(fp, FC_RCTL_BA_ABTS, ep->did, ep->sid,
|
||||
FC_TYPE_BLS, FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
|
||||
FC_TYPE_BLS, FC_FC_END_SEQ |
|
||||
FC_FC_SEQ_INIT, 0);
|
||||
error = fc_seq_send_locked(ep->lp, sp, fp);
|
||||
} else
|
||||
} else {
|
||||
error = -ENOBUFS;
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* If not logged into the fabric, don't send ABTS but leave
|
||||
* sequence active until next timeout.
|
||||
*/
|
||||
error = 0;
|
||||
}
|
||||
ep->esb_stat |= ESB_ST_ABNORMAL;
|
||||
return error;
|
||||
}
|
||||
|
||||
|
@ -668,6 +693,61 @@ static int fc_seq_exch_abort(const struct fc_seq *req_sp,
|
|||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_invoke_resp() - invoke ep->resp()
|
||||
*
|
||||
* Notes:
|
||||
* It is assumed that after initialization finished (this means the
|
||||
* first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are
|
||||
* modified only via fc_seq_set_resp(). This guarantees that none of these
|
||||
* two variables changes if ep->resp_active > 0.
|
||||
*
|
||||
* If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when
|
||||
* this function is invoked, the first spin_lock_bh() call in this function
|
||||
* will wait until fc_seq_set_resp() has finished modifying these variables.
|
||||
*
|
||||
* Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that
|
||||
* ep->resp() won't be invoked after fc_exch_done() has returned.
|
||||
*
|
||||
* The response handler itself may invoke fc_exch_done(), which will clear the
|
||||
* ep->resp pointer.
|
||||
*
|
||||
* Return value:
|
||||
* Returns true if and only if ep->resp has been invoked.
|
||||
*/
|
||||
static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
|
||||
struct fc_frame *fp)
|
||||
{
|
||||
void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
|
||||
void *arg;
|
||||
bool res = false;
|
||||
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
ep->resp_active++;
|
||||
if (ep->resp_task != current)
|
||||
ep->resp_task = !ep->resp_task ? current : NULL;
|
||||
resp = ep->resp;
|
||||
arg = ep->arg;
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
|
||||
if (resp) {
|
||||
resp(sp, fp, arg);
|
||||
res = true;
|
||||
} else if (!IS_ERR(fp)) {
|
||||
fc_frame_free(fp);
|
||||
}
|
||||
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
if (--ep->resp_active == 0)
|
||||
ep->resp_task = NULL;
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
|
||||
if (ep->resp_active == 0)
|
||||
wake_up(&ep->resp_wq);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* fc_exch_timeout() - Handle exchange timer expiration
|
||||
* @work: The work_struct identifying the exchange that timed out
|
||||
|
@ -677,8 +757,6 @@ static void fc_exch_timeout(struct work_struct *work)
|
|||
struct fc_exch *ep = container_of(work, struct fc_exch,
|
||||
timeout_work.work);
|
||||
struct fc_seq *sp = &ep->seq;
|
||||
void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
|
||||
void *arg;
|
||||
u32 e_stat;
|
||||
int rc = 1;
|
||||
|
||||
|
@ -696,16 +774,13 @@ static void fc_exch_timeout(struct work_struct *work)
|
|||
fc_exch_rrq(ep);
|
||||
goto done;
|
||||
} else {
|
||||
resp = ep->resp;
|
||||
arg = ep->arg;
|
||||
ep->resp = NULL;
|
||||
if (e_stat & ESB_ST_ABNORMAL)
|
||||
rc = fc_exch_done_locked(ep);
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
if (!rc)
|
||||
fc_exch_delete(ep);
|
||||
if (resp)
|
||||
resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
|
||||
fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_TIMEOUT));
|
||||
fc_seq_set_resp(sp, NULL, ep->arg);
|
||||
fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
|
||||
goto done;
|
||||
}
|
||||
|
@ -792,6 +867,8 @@ hit:
|
|||
ep->f_ctl = FC_FC_FIRST_SEQ; /* next seq is first seq */
|
||||
ep->rxid = FC_XID_UNKNOWN;
|
||||
ep->class = mp->class;
|
||||
ep->resp_active = 0;
|
||||
init_waitqueue_head(&ep->resp_wq);
|
||||
INIT_DELAYED_WORK(&ep->timeout_work, fc_exch_timeout);
|
||||
out:
|
||||
return ep;
|
||||
|
@ -838,8 +915,10 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
|
|||
pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
|
||||
spin_lock_bh(&pool->lock);
|
||||
ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
|
||||
if (ep && ep->xid == xid)
|
||||
if (ep) {
|
||||
WARN_ON(ep->xid != xid);
|
||||
fc_exch_hold(ep);
|
||||
}
|
||||
spin_unlock_bh(&pool->lock);
|
||||
}
|
||||
return ep;
|
||||
|
@ -850,6 +929,8 @@ static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
|
|||
* fc_exch_done() - Indicate that an exchange/sequence tuple is complete and
|
||||
* the memory allocated for the related objects may be freed.
|
||||
* @sp: The sequence that has completed
|
||||
*
|
||||
* Note: May sleep if invoked from outside a response handler.
|
||||
*/
|
||||
static void fc_exch_done(struct fc_seq *sp)
|
||||
{
|
||||
|
@ -859,6 +940,8 @@ static void fc_exch_done(struct fc_seq *sp)
|
|||
spin_lock_bh(&ep->ex_lock);
|
||||
rc = fc_exch_done_locked(ep);
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
|
||||
fc_seq_set_resp(sp, NULL, ep->arg);
|
||||
if (!rc)
|
||||
fc_exch_delete(ep);
|
||||
}
|
||||
|
@ -987,6 +1070,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
|
|||
}
|
||||
}
|
||||
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
/*
|
||||
* At this point, we have the exchange held.
|
||||
* Find or create the sequence.
|
||||
|
@ -1014,11 +1098,11 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
|
|||
* sending RSP, hence write request on other
|
||||
* end never finishes.
|
||||
*/
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
sp->ssb_stat |= SSB_ST_RESP;
|
||||
sp->id = fh->fh_seq_id;
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
} else {
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
|
||||
/* sequence/exch should exist */
|
||||
reject = FC_RJT_SEQ_ID;
|
||||
goto rel;
|
||||
|
@ -1029,6 +1113,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
|
|||
|
||||
if (f_ctl & FC_FC_SEQ_INIT)
|
||||
ep->esb_stat |= ESB_ST_SEQ_INIT;
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
|
||||
fr_seq(fp) = sp;
|
||||
out:
|
||||
|
@ -1291,21 +1376,23 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
|
|||
|
||||
if (!ep)
|
||||
goto reject;
|
||||
|
||||
fp = fc_frame_alloc(ep->lp, sizeof(*ap));
|
||||
if (!fp)
|
||||
goto free;
|
||||
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
if (ep->esb_stat & ESB_ST_COMPLETE) {
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
|
||||
fc_frame_free(fp);
|
||||
goto reject;
|
||||
}
|
||||
if (!(ep->esb_stat & ESB_ST_REC_QUAL))
|
||||
if (!(ep->esb_stat & ESB_ST_REC_QUAL)) {
|
||||
ep->esb_stat |= ESB_ST_REC_QUAL;
|
||||
fc_exch_hold(ep); /* hold for REC_QUAL */
|
||||
ep->esb_stat |= ESB_ST_ABNORMAL | ESB_ST_REC_QUAL;
|
||||
fc_exch_timer_set_locked(ep, ep->r_a_tov);
|
||||
|
||||
fp = fc_frame_alloc(ep->lp, sizeof(*ap));
|
||||
if (!fp) {
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
goto free;
|
||||
}
|
||||
fc_exch_timer_set_locked(ep, ep->r_a_tov);
|
||||
fh = fc_frame_header_get(fp);
|
||||
ap = fc_frame_payload_get(fp, sizeof(*ap));
|
||||
memset(ap, 0, sizeof(*ap));
|
||||
|
@ -1319,14 +1406,16 @@ static void fc_exch_recv_abts(struct fc_exch *ep, struct fc_frame *rx_fp)
|
|||
}
|
||||
sp = fc_seq_start_next_locked(sp);
|
||||
fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
|
||||
ep->esb_stat |= ESB_ST_ABNORMAL;
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
|
||||
free:
|
||||
fc_frame_free(rx_fp);
|
||||
return;
|
||||
|
||||
reject:
|
||||
fc_exch_send_ba_rjt(rx_fp, FC_BA_RJT_UNABLE, FC_BA_RJT_INV_XID);
|
||||
free:
|
||||
fc_frame_free(rx_fp);
|
||||
goto free;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1416,9 +1505,7 @@ static void fc_exch_recv_req(struct fc_lport *lport, struct fc_exch_mgr *mp,
|
|||
* If new exch resp handler is valid then call that
|
||||
* first.
|
||||
*/
|
||||
if (ep->resp)
|
||||
ep->resp(sp, fp, ep->arg);
|
||||
else
|
||||
if (!fc_invoke_resp(ep, sp, fp))
|
||||
lport->tt.lport_recv(lport, fp);
|
||||
fc_exch_release(ep); /* release from lookup */
|
||||
} else {
|
||||
|
@ -1442,8 +1529,6 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
|
|||
struct fc_exch *ep;
|
||||
enum fc_sof sof;
|
||||
u32 f_ctl;
|
||||
void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
|
||||
void *ex_resp_arg;
|
||||
int rc;
|
||||
|
||||
ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
|
||||
|
@ -1478,19 +1563,19 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
|
|||
|
||||
f_ctl = ntoh24(fh->fh_f_ctl);
|
||||
fr_seq(fp) = sp;
|
||||
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
if (f_ctl & FC_FC_SEQ_INIT)
|
||||
ep->esb_stat |= ESB_ST_SEQ_INIT;
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
|
||||
if (fc_sof_needs_ack(sof))
|
||||
fc_seq_send_ack(sp, fp);
|
||||
resp = ep->resp;
|
||||
ex_resp_arg = ep->arg;
|
||||
|
||||
if (fh->fh_type != FC_TYPE_FCP && fr_eof(fp) == FC_EOF_T &&
|
||||
(f_ctl & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
|
||||
(FC_FC_LAST_SEQ | FC_FC_END_SEQ)) {
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
resp = ep->resp;
|
||||
rc = fc_exch_done_locked(ep);
|
||||
WARN_ON(fc_seq_exch(sp) != ep);
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
|
@ -1511,10 +1596,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
|
|||
* If new exch resp handler is valid then call that
|
||||
* first.
|
||||
*/
|
||||
if (resp)
|
||||
resp(sp, fp, ex_resp_arg);
|
||||
else
|
||||
fc_frame_free(fp);
|
||||
fc_invoke_resp(ep, sp, fp);
|
||||
|
||||
fc_exch_release(ep);
|
||||
return;
|
||||
rel:
|
||||
|
@ -1553,8 +1636,6 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
|
|||
*/
|
||||
static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
|
||||
{
|
||||
void (*resp)(struct fc_seq *, struct fc_frame *fp, void *arg);
|
||||
void *ex_resp_arg;
|
||||
struct fc_frame_header *fh;
|
||||
struct fc_ba_acc *ap;
|
||||
struct fc_seq *sp;
|
||||
|
@ -1599,9 +1680,6 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
|
|||
break;
|
||||
}
|
||||
|
||||
resp = ep->resp;
|
||||
ex_resp_arg = ep->arg;
|
||||
|
||||
/* do we need to do some other checks here. Can we reuse more of
|
||||
* fc_exch_recv_seq_resp
|
||||
*/
|
||||
|
@ -1613,17 +1691,14 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
|
|||
ntoh24(fh->fh_f_ctl) & FC_FC_LAST_SEQ)
|
||||
rc = fc_exch_done_locked(ep);
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
|
||||
fc_exch_hold(ep);
|
||||
if (!rc)
|
||||
fc_exch_delete(ep);
|
||||
|
||||
if (resp)
|
||||
resp(sp, fp, ex_resp_arg);
|
||||
else
|
||||
fc_frame_free(fp);
|
||||
|
||||
fc_invoke_resp(ep, sp, fp);
|
||||
if (has_rec)
|
||||
fc_exch_timer_set(ep, ep->r_a_tov);
|
||||
|
||||
fc_exch_release(ep);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1662,7 +1737,7 @@ static void fc_exch_recv_bls(struct fc_exch_mgr *mp, struct fc_frame *fp)
|
|||
break;
|
||||
default:
|
||||
if (ep)
|
||||
FC_EXCH_DBG(ep, "BLS rctl %x - %s received",
|
||||
FC_EXCH_DBG(ep, "BLS rctl %x - %s received\n",
|
||||
fh->fh_r_ctl,
|
||||
fc_exch_rctl_name(fh->fh_r_ctl));
|
||||
break;
|
||||
|
@ -1745,32 +1820,33 @@ static void fc_seq_ls_rjt(struct fc_frame *rx_fp, enum fc_els_rjt_reason reason,
|
|||
/**
|
||||
* fc_exch_reset() - Reset an exchange
|
||||
* @ep: The exchange to be reset
|
||||
*
|
||||
* Note: May sleep if invoked from outside a response handler.
|
||||
*/
|
||||
static void fc_exch_reset(struct fc_exch *ep)
|
||||
{
|
||||
struct fc_seq *sp;
|
||||
void (*resp)(struct fc_seq *, struct fc_frame *, void *);
|
||||
void *arg;
|
||||
int rc = 1;
|
||||
|
||||
spin_lock_bh(&ep->ex_lock);
|
||||
fc_exch_abort_locked(ep, 0);
|
||||
ep->state |= FC_EX_RST_CLEANUP;
|
||||
fc_exch_timer_cancel(ep);
|
||||
resp = ep->resp;
|
||||
ep->resp = NULL;
|
||||
if (ep->esb_stat & ESB_ST_REC_QUAL)
|
||||
atomic_dec(&ep->ex_refcnt); /* drop hold for rec_qual */
|
||||
ep->esb_stat &= ~ESB_ST_REC_QUAL;
|
||||
arg = ep->arg;
|
||||
sp = &ep->seq;
|
||||
rc = fc_exch_done_locked(ep);
|
||||
spin_unlock_bh(&ep->ex_lock);
|
||||
|
||||
fc_exch_hold(ep);
|
||||
|
||||
if (!rc)
|
||||
fc_exch_delete(ep);
|
||||
|
||||
if (resp)
|
||||
resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
|
||||
fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED));
|
||||
fc_seq_set_resp(sp, NULL, ep->arg);
|
||||
fc_exch_release(ep);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1956,13 +2032,13 @@ static void fc_exch_rrq_resp(struct fc_seq *sp, struct fc_frame *fp, void *arg)
|
|||
|
||||
switch (op) {
|
||||
case ELS_LS_RJT:
|
||||
FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ");
|
||||
FC_EXCH_DBG(aborted_ep, "LS_RJT for RRQ\n");
|
||||
/* fall through */
|
||||
case ELS_LS_ACC:
|
||||
goto cleanup;
|
||||
default:
|
||||
FC_EXCH_DBG(aborted_ep, "unexpected response op %x "
|
||||
"for RRQ", op);
|
||||
FC_EXCH_DBG(aborted_ep, "unexpected response op %x for RRQ\n",
|
||||
op);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2533,13 +2609,8 @@ int fc_setup_exch_mgr(void)
|
|||
* cpu on which exchange originated by simple bitwise
|
||||
* AND operation between fc_cpu_mask and exchange id.
|
||||
*/
|
||||
fc_cpu_mask = 1;
|
||||
fc_cpu_order = 0;
|
||||
while (fc_cpu_mask < nr_cpu_ids) {
|
||||
fc_cpu_mask <<= 1;
|
||||
fc_cpu_order++;
|
||||
}
|
||||
fc_cpu_mask--;
|
||||
fc_cpu_order = ilog2(roundup_pow_of_two(nr_cpu_ids));
|
||||
fc_cpu_mask = (1 << fc_cpu_order) - 1;
|
||||
|
||||
fc_exch_workqueue = create_singlethread_workqueue("fc_exch_workqueue");
|
||||
if (!fc_exch_workqueue)
|
||||
|
|
|
@ -902,7 +902,8 @@ static void fc_fcp_resp(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
|
|||
/*
|
||||
* Check for missing or extra data frames.
|
||||
*/
|
||||
if (unlikely(fsp->xfer_len != expected_len)) {
|
||||
if (unlikely(fsp->cdb_status == SAM_STAT_GOOD &&
|
||||
fsp->xfer_len != expected_len)) {
|
||||
if (fsp->xfer_len < expected_len) {
|
||||
/*
|
||||
* Some data may be queued locally,
|
||||
|
@ -955,12 +956,11 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *fsp)
|
|||
* Test for transport underrun, independent of response
|
||||
* underrun status.
|
||||
*/
|
||||
if (fsp->xfer_len < fsp->data_len && !fsp->io_status &&
|
||||
if (fsp->cdb_status == SAM_STAT_GOOD &&
|
||||
fsp->xfer_len < fsp->data_len && !fsp->io_status &&
|
||||
(!(fsp->scsi_comp_flags & FCP_RESID_UNDER) ||
|
||||
fsp->xfer_len < fsp->data_len - fsp->scsi_resid)) {
|
||||
fsp->xfer_len < fsp->data_len - fsp->scsi_resid))
|
||||
fsp->status_code = FC_DATA_UNDRUN;
|
||||
fsp->io_status = 0;
|
||||
}
|
||||
}
|
||||
|
||||
seq = fsp->seq_ptr;
|
||||
|
|
|
@ -516,7 +516,7 @@ static void fc_lport_recv_rnid_req(struct fc_lport *lport,
|
|||
* @lport: The local port receiving the LOGO
|
||||
* @fp: The LOGO request frame
|
||||
*
|
||||
* Locking Note: The lport lock is exected to be held before calling
|
||||
* Locking Note: The lport lock is expected to be held before calling
|
||||
* this function.
|
||||
*/
|
||||
static void fc_lport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
|
||||
|
@ -1088,7 +1088,7 @@ static void fc_lport_error(struct fc_lport *lport, struct fc_frame *fp)
|
|||
{
|
||||
unsigned long delay = 0;
|
||||
FC_LPORT_DBG(lport, "Error %ld in state %s, retries %d\n",
|
||||
PTR_ERR(fp), fc_lport_state(lport),
|
||||
IS_ERR(fp) ? -PTR_ERR(fp) : 0, fc_lport_state(lport),
|
||||
lport->retry_count);
|
||||
|
||||
if (PTR_ERR(fp) == -FC_EX_CLOSED)
|
||||
|
|
|
@ -1705,7 +1705,7 @@ reject:
|
|||
* @rdata: The remote port that sent the PRLI request
|
||||
* @rx_fp: The PRLI request frame
|
||||
*
|
||||
* Locking Note: The rport lock is exected to be held before calling
|
||||
* Locking Note: The rport lock is expected to be held before calling
|
||||
* this function.
|
||||
*/
|
||||
static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
|
||||
|
@ -1824,7 +1824,7 @@ drop:
|
|||
* @rdata: The remote port that sent the PRLO request
|
||||
* @rx_fp: The PRLO request frame
|
||||
*
|
||||
* Locking Note: The rport lock is exected to be held before calling
|
||||
* Locking Note: The rport lock is expected to be held before calling
|
||||
* this function.
|
||||
*/
|
||||
static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
|
||||
|
@ -1895,7 +1895,7 @@ drop:
|
|||
* @lport: The local port that received the LOGO request
|
||||
* @fp: The LOGO request frame
|
||||
*
|
||||
* Locking Note: The rport lock is exected to be held before calling
|
||||
* Locking Note: The rport lock is expected to be held before calling
|
||||
* this function.
|
||||
*/
|
||||
static void fc_rport_recv_logo_req(struct fc_lport *lport, struct fc_frame *fp)
|
||||
|
|
|
@ -104,7 +104,7 @@ struct fc_esb {
|
|||
* esb_e_stat - flags from FC-FS-2 T11/1619-D Rev 0.90.
|
||||
*/
|
||||
#define ESB_ST_RESP (1 << 31) /* responder to exchange */
|
||||
#define ESB_ST_SEQ_INIT (1 << 30) /* port holds sequence initiaive */
|
||||
#define ESB_ST_SEQ_INIT (1 << 30) /* port holds sequence initiative */
|
||||
#define ESB_ST_COMPLETE (1 << 29) /* exchange is complete */
|
||||
#define ESB_ST_ABNORMAL (1 << 28) /* abnormal ending condition */
|
||||
#define ESB_ST_REC_QUAL (1 << 26) /* recovery qualifier active */
|
||||
|
|
|
@ -410,6 +410,12 @@ struct fc_seq {
|
|||
* @fh_type: The frame type
|
||||
* @class: The class of service
|
||||
* @seq: The sequence in use on this exchange
|
||||
* @resp_active: Number of tasks that are concurrently executing @resp().
|
||||
* @resp_task: If @resp_active > 0, either the task executing @resp(), the
|
||||
* task that has been interrupted to execute the soft-IRQ
|
||||
* executing @resp() or NULL if more than one task is executing
|
||||
* @resp concurrently.
|
||||
* @resp_wq: Waitqueue for the tasks waiting on @resp_active.
|
||||
* @resp: Callback for responses on this exchange
|
||||
* @destructor: Called when destroying the exchange
|
||||
* @arg: Passed as a void pointer to the resp() callback
|
||||
|
@ -441,6 +447,9 @@ struct fc_exch {
|
|||
u32 r_a_tov;
|
||||
u32 f_ctl;
|
||||
struct fc_seq seq;
|
||||
int resp_active;
|
||||
struct task_struct *resp_task;
|
||||
wait_queue_head_t resp_wq;
|
||||
void (*resp)(struct fc_seq *, struct fc_frame *, void *);
|
||||
void *arg;
|
||||
void (*destructor)(struct fc_seq *, void *);
|
||||
|
|
|
@ -90,6 +90,7 @@ enum fip_state {
|
|||
* @lp: &fc_lport: libfc local port.
|
||||
* @sel_fcf: currently selected FCF, or NULL.
|
||||
* @fcfs: list of discovered FCFs.
|
||||
* @cdev: (Optional) pointer to sysfs fcoe_ctlr_device.
|
||||
* @fcf_count: number of discovered FCF entries.
|
||||
* @sol_time: time when a multicast solicitation was last sent.
|
||||
* @sel_time: time after which to select an FCF.
|
||||
|
@ -127,6 +128,7 @@ struct fcoe_ctlr {
|
|||
struct fc_lport *lp;
|
||||
struct fcoe_fcf *sel_fcf;
|
||||
struct list_head fcfs;
|
||||
struct fcoe_ctlr_device *cdev;
|
||||
u16 fcf_count;
|
||||
unsigned long sol_time;
|
||||
unsigned long sel_time;
|
||||
|
@ -168,8 +170,11 @@ static inline void *fcoe_ctlr_priv(const struct fcoe_ctlr *ctlr)
|
|||
return (void *)(ctlr + 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* This assumes that the fcoe_ctlr (x) is allocated with the fcoe_ctlr_device.
|
||||
*/
|
||||
#define fcoe_ctlr_to_ctlr_dev(x) \
|
||||
(struct fcoe_ctlr_device *)(((struct fcoe_ctlr_device *)(x)) - 1)
|
||||
(x)->cdev
|
||||
|
||||
/**
|
||||
* struct fcoe_fcf - Fibre-Channel Forwarder
|
||||
|
|
Загрузка…
Ссылка в новой задаче