Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller: 1) The sockmap code has to free socket memory on close if there is corked data, from John Fastabend. 2) Tunnel names coming from userspace need to be length validated. From Eric Dumazet. 3) arp_filter() has to take VRFs properly into account, from Miguel Fadon Perlines. 4) Fix oops in error path of tcf_bpf_init(), from Davide Caratti. 5) Missing idr_remove() in u32_delete_key(), from Cong Wang. 6) More syzbot stuff. Several use of uninitialized value fixes all over, from Eric Dumazet. 7) Do not leak kernel memory to userspace in sctp, also from Eric Dumazet. 8) Discard frames from unused ports in DSA, from Andrew Lunn. 9) Fix DMA mapping and reset/failover problems in ibmvnic, from Thomas Falcon. 10) Do not access dp83640 PHY registers prematurely after reset, from Esben Haabendal. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (46 commits) vhost-net: set packet weight of tx polling to 2 * vq size net: thunderx: rework mac addresses list to u64 array inetpeer: fix uninit-value in inet_getpeer dp83640: Ensure against premature access to PHY registers after reset devlink: convert occ_get op to separate registration ARM: dts: ls1021a: Specify TBIPA register address net/fsl_pq_mdio: Allow explicit speficition of TBIPA address ibmvnic: Do not reset CRQ for Mobility driver resets ibmvnic: Fix failover case for non-redundant configuration ibmvnic: Fix reset scheduler error handling ibmvnic: Zero used TX descriptor counter on reset ibmvnic: Fix DMA mapping mistakes tipc: use the right skb in tipc_sk_fill_sock_diag() sctp: sctp_sockaddr_af must check minimal addr length for AF_INET6 net: dsa: Discard frames from unused ports sctp: do not leak kernel memory to user space soreuseport: initialise timewait reuseport field ipv4: fix uninit-value in ip_route_output_key_hash_rcu() dccp: initialize ireq->ir_mark net: fix uninit-value in __hw_addr_add_ex() ...
This commit is contained in:
Коммит
c18bb396d3
|
@ -6,7 +6,11 @@ the definition of the PHY node in booting-without-of.txt for an example
|
|||
of how to define a PHY.
|
||||
|
||||
Required properties:
|
||||
- reg : Offset and length of the register set for the device
|
||||
- reg : Offset and length of the register set for the device, and optionally
|
||||
the offset and length of the TBIPA register (TBI PHY address
|
||||
register). If TBIPA register is not specified, the driver will
|
||||
attempt to infer it from the register set specified (your mileage may
|
||||
vary).
|
||||
- compatible : Should define the compatible device type for the
|
||||
mdio. Currently supported strings/devices are:
|
||||
- "fsl,gianfar-tbi"
|
||||
|
|
|
@ -587,7 +587,8 @@
|
|||
device_type = "mdio";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
reg = <0x0 0x2d24000 0x0 0x4000>;
|
||||
reg = <0x0 0x2d24000 0x0 0x4000>,
|
||||
<0x0 0x2d10030 0x0 0x4>;
|
||||
};
|
||||
|
||||
ptp_clock@2d10e00 {
|
||||
|
|
|
@ -158,16 +158,16 @@ static int alg_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
|
|||
void *private;
|
||||
int err;
|
||||
|
||||
/* If caller uses non-allowed flag, return error. */
|
||||
if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
|
||||
return -EINVAL;
|
||||
|
||||
if (sock->state == SS_CONNECTED)
|
||||
return -EINVAL;
|
||||
|
||||
if (addr_len < sizeof(*sa))
|
||||
return -EINVAL;
|
||||
|
||||
/* If caller uses non-allowed flag, return error. */
|
||||
if ((sa->salg_feat & ~allowed) || (sa->salg_mask & ~allowed))
|
||||
return -EINVAL;
|
||||
|
||||
sa->salg_type[sizeof(sa->salg_type) - 1] = 0;
|
||||
sa->salg_name[sizeof(sa->salg_name) + addr_len - sizeof(*sa) - 1] = 0;
|
||||
|
||||
|
|
|
@ -265,14 +265,9 @@ struct nicvf_drv_stats {
|
|||
|
||||
struct cavium_ptp;
|
||||
|
||||
struct xcast_addr {
|
||||
struct list_head list;
|
||||
u64 addr;
|
||||
};
|
||||
|
||||
struct xcast_addr_list {
|
||||
struct list_head list;
|
||||
int count;
|
||||
u64 mc[];
|
||||
};
|
||||
|
||||
struct nicvf_work {
|
||||
|
|
|
@ -1929,7 +1929,7 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
|
|||
work.work);
|
||||
struct nicvf *nic = container_of(vf_work, struct nicvf, rx_mode_work);
|
||||
union nic_mbx mbx = {};
|
||||
struct xcast_addr *xaddr, *next;
|
||||
int idx;
|
||||
|
||||
if (!vf_work)
|
||||
return;
|
||||
|
@ -1956,16 +1956,10 @@ static void nicvf_set_rx_mode_task(struct work_struct *work_arg)
|
|||
/* check if we have any specific MACs to be added to PF DMAC filter */
|
||||
if (vf_work->mc) {
|
||||
/* now go through kernel list of MACs and add them one by one */
|
||||
list_for_each_entry_safe(xaddr, next,
|
||||
&vf_work->mc->list, list) {
|
||||
for (idx = 0; idx < vf_work->mc->count; idx++) {
|
||||
mbx.xcast.msg = NIC_MBOX_MSG_ADD_MCAST;
|
||||
mbx.xcast.data.mac = xaddr->addr;
|
||||
mbx.xcast.data.mac = vf_work->mc->mc[idx];
|
||||
nicvf_send_msg_to_pf(nic, &mbx);
|
||||
|
||||
/* after receiving ACK from PF release memory */
|
||||
list_del(&xaddr->list);
|
||||
kfree(xaddr);
|
||||
vf_work->mc->count--;
|
||||
}
|
||||
kfree(vf_work->mc);
|
||||
}
|
||||
|
@ -1996,17 +1990,15 @@ static void nicvf_set_rx_mode(struct net_device *netdev)
|
|||
mode |= BGX_XCAST_MCAST_FILTER;
|
||||
/* here we need to copy mc addrs */
|
||||
if (netdev_mc_count(netdev)) {
|
||||
struct xcast_addr *xaddr;
|
||||
|
||||
mc_list = kmalloc(sizeof(*mc_list), GFP_ATOMIC);
|
||||
INIT_LIST_HEAD(&mc_list->list);
|
||||
mc_list = kmalloc(offsetof(typeof(*mc_list),
|
||||
mc[netdev_mc_count(netdev)]),
|
||||
GFP_ATOMIC);
|
||||
if (unlikely(!mc_list))
|
||||
return;
|
||||
mc_list->count = 0;
|
||||
netdev_hw_addr_list_for_each(ha, &netdev->mc) {
|
||||
xaddr = kmalloc(sizeof(*xaddr),
|
||||
GFP_ATOMIC);
|
||||
xaddr->addr =
|
||||
mc_list->mc[mc_list->count] =
|
||||
ether_addr_to_u64(ha->addr);
|
||||
list_add_tail(&xaddr->list,
|
||||
&mc_list->list);
|
||||
mc_list->count++;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -377,6 +377,38 @@ static const struct of_device_id fsl_pq_mdio_match[] = {
|
|||
};
|
||||
MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
|
||||
|
||||
static void set_tbipa(const u32 tbipa_val, struct platform_device *pdev,
|
||||
uint32_t __iomem * (*get_tbipa)(void __iomem *),
|
||||
void __iomem *reg_map, struct resource *reg_res)
|
||||
{
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
uint32_t __iomem *tbipa;
|
||||
bool tbipa_mapped;
|
||||
|
||||
tbipa = of_iomap(np, 1);
|
||||
if (tbipa) {
|
||||
tbipa_mapped = true;
|
||||
} else {
|
||||
tbipa_mapped = false;
|
||||
tbipa = (*get_tbipa)(reg_map);
|
||||
|
||||
/*
|
||||
* Add consistency check to make sure TBI is contained within
|
||||
* the mapped range (not because we would get a segfault,
|
||||
* rather to catch bugs in computing TBI address). Print error
|
||||
* message but continue anyway.
|
||||
*/
|
||||
if ((void *)tbipa > reg_map + resource_size(reg_res) - 4)
|
||||
dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
|
||||
((void *)tbipa - reg_map) + 4);
|
||||
}
|
||||
|
||||
iowrite32be(be32_to_cpu(tbipa_val), tbipa);
|
||||
|
||||
if (tbipa_mapped)
|
||||
iounmap(tbipa);
|
||||
}
|
||||
|
||||
static int fsl_pq_mdio_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct of_device_id *id =
|
||||
|
@ -450,8 +482,6 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
|
|||
|
||||
if (tbi) {
|
||||
const u32 *prop = of_get_property(tbi, "reg", NULL);
|
||||
uint32_t __iomem *tbipa;
|
||||
|
||||
if (!prop) {
|
||||
dev_err(&pdev->dev,
|
||||
"missing 'reg' property in node %pOF\n",
|
||||
|
@ -459,20 +489,8 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev)
|
|||
err = -EBUSY;
|
||||
goto error;
|
||||
}
|
||||
|
||||
tbipa = data->get_tbipa(priv->map);
|
||||
|
||||
/*
|
||||
* Add consistency check to make sure TBI is contained
|
||||
* within the mapped range (not because we would get a
|
||||
* segfault, rather to catch bugs in computing TBI
|
||||
* address). Print error message but continue anyway.
|
||||
*/
|
||||
if ((void *)tbipa > priv->map + resource_size(&res) - 4)
|
||||
dev_err(&pdev->dev, "invalid register map (should be at least 0x%04zx to contain TBI address)\n",
|
||||
((void *)tbipa - priv->map) + 4);
|
||||
|
||||
iowrite32be(be32_to_cpup(prop), tbipa);
|
||||
set_tbipa(*prop, pdev,
|
||||
data->get_tbipa, priv->map, &res);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -118,6 +118,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter);
|
|||
static int ibmvnic_init(struct ibmvnic_adapter *);
|
||||
static void release_crq_queue(struct ibmvnic_adapter *);
|
||||
static int __ibmvnic_set_mac(struct net_device *netdev, struct sockaddr *p);
|
||||
static int init_crq_queue(struct ibmvnic_adapter *adapter);
|
||||
|
||||
struct ibmvnic_stat {
|
||||
char name[ETH_GSTRING_LEN];
|
||||
|
@ -320,18 +321,16 @@ failure:
|
|||
dev_info(dev, "replenish pools failure\n");
|
||||
pool->free_map[pool->next_free] = index;
|
||||
pool->rx_buff[index].skb = NULL;
|
||||
if (!dma_mapping_error(dev, dma_addr))
|
||||
dma_unmap_single(dev, dma_addr, pool->buff_size,
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
dev_kfree_skb_any(skb);
|
||||
adapter->replenish_add_buff_failure++;
|
||||
atomic_add(buffers_added, &pool->available);
|
||||
|
||||
if (lpar_rc == H_CLOSED) {
|
||||
if (lpar_rc == H_CLOSED || adapter->failover_pending) {
|
||||
/* Disable buffer pool replenishment and report carrier off if
|
||||
* queue is closed. Firmware guarantees that a signal will
|
||||
* be sent to the driver, triggering a reset.
|
||||
* queue is closed or pending failover.
|
||||
* Firmware guarantees that a signal will be sent to the
|
||||
* driver, triggering a reset.
|
||||
*/
|
||||
deactivate_rx_pools(adapter);
|
||||
netif_carrier_off(adapter->netdev);
|
||||
|
@ -1071,6 +1070,14 @@ static int ibmvnic_open(struct net_device *netdev)
|
|||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
int rc;
|
||||
|
||||
/* If device failover is pending, just set device state and return.
|
||||
* Device operation will be handled by reset routine.
|
||||
*/
|
||||
if (adapter->failover_pending) {
|
||||
adapter->state = VNIC_OPEN;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&adapter->reset_lock);
|
||||
|
||||
if (adapter->state != VNIC_CLOSED) {
|
||||
|
@ -1218,7 +1225,6 @@ static int __ibmvnic_close(struct net_device *netdev)
|
|||
rc = set_link_state(adapter, IBMVNIC_LOGICAL_LNK_DN);
|
||||
if (rc)
|
||||
return rc;
|
||||
ibmvnic_cleanup(netdev);
|
||||
adapter->state = VNIC_CLOSED;
|
||||
return 0;
|
||||
}
|
||||
|
@ -1228,8 +1234,17 @@ static int ibmvnic_close(struct net_device *netdev)
|
|||
struct ibmvnic_adapter *adapter = netdev_priv(netdev);
|
||||
int rc;
|
||||
|
||||
/* If device failover is pending, just set device state and return.
|
||||
* Device operation will be handled by reset routine.
|
||||
*/
|
||||
if (adapter->failover_pending) {
|
||||
adapter->state = VNIC_CLOSED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&adapter->reset_lock);
|
||||
rc = __ibmvnic_close(netdev);
|
||||
ibmvnic_cleanup(netdev);
|
||||
mutex_unlock(&adapter->reset_lock);
|
||||
|
||||
return rc;
|
||||
|
@ -1562,8 +1577,9 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
|||
dev_kfree_skb_any(skb);
|
||||
tx_buff->skb = NULL;
|
||||
|
||||
if (lpar_rc == H_CLOSED) {
|
||||
/* Disable TX and report carrier off if queue is closed.
|
||||
if (lpar_rc == H_CLOSED || adapter->failover_pending) {
|
||||
/* Disable TX and report carrier off if queue is closed
|
||||
* or pending failover.
|
||||
* Firmware guarantees that a signal will be sent to the
|
||||
* driver, triggering a reset or some other action.
|
||||
*/
|
||||
|
@ -1711,14 +1727,10 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||
old_num_rx_queues = adapter->req_rx_queues;
|
||||
old_num_tx_queues = adapter->req_tx_queues;
|
||||
|
||||
if (rwi->reset_reason == VNIC_RESET_MOBILITY) {
|
||||
rc = ibmvnic_reenable_crq_queue(adapter);
|
||||
if (rc)
|
||||
return 0;
|
||||
ibmvnic_cleanup(netdev);
|
||||
} else if (rwi->reset_reason == VNIC_RESET_FAILOVER) {
|
||||
ibmvnic_cleanup(netdev);
|
||||
} else {
|
||||
ibmvnic_cleanup(netdev);
|
||||
|
||||
if (adapter->reset_reason != VNIC_RESET_MOBILITY &&
|
||||
adapter->reset_reason != VNIC_RESET_FAILOVER) {
|
||||
rc = __ibmvnic_close(netdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
@ -1737,6 +1749,23 @@ static int do_reset(struct ibmvnic_adapter *adapter,
|
|||
*/
|
||||
adapter->state = VNIC_PROBED;
|
||||
|
||||
if (adapter->wait_for_reset) {
|
||||
rc = init_crq_queue(adapter);
|
||||
} else if (adapter->reset_reason == VNIC_RESET_MOBILITY) {
|
||||
rc = ibmvnic_reenable_crq_queue(adapter);
|
||||
release_sub_crqs(adapter, 1);
|
||||
} else {
|
||||
rc = ibmvnic_reset_crq(adapter);
|
||||
if (!rc)
|
||||
rc = vio_enable_interrupts(adapter->vdev);
|
||||
}
|
||||
|
||||
if (rc) {
|
||||
netdev_err(adapter->netdev,
|
||||
"Couldn't initialize crq. rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = ibmvnic_init(adapter);
|
||||
if (rc)
|
||||
return IBMVNIC_INIT_FAILED;
|
||||
|
@ -1878,23 +1907,26 @@ static void __ibmvnic_reset(struct work_struct *work)
|
|||
mutex_unlock(&adapter->reset_lock);
|
||||
}
|
||||
|
||||
static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
||||
enum ibmvnic_reset_reason reason)
|
||||
static int ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
||||
enum ibmvnic_reset_reason reason)
|
||||
{
|
||||
struct ibmvnic_rwi *rwi, *tmp;
|
||||
struct net_device *netdev = adapter->netdev;
|
||||
struct list_head *entry;
|
||||
int ret;
|
||||
|
||||
if (adapter->state == VNIC_REMOVING ||
|
||||
adapter->state == VNIC_REMOVED) {
|
||||
netdev_dbg(netdev, "Adapter removing, skipping reset\n");
|
||||
return;
|
||||
adapter->state == VNIC_REMOVED ||
|
||||
adapter->failover_pending) {
|
||||
ret = EBUSY;
|
||||
netdev_dbg(netdev, "Adapter removing or pending failover, skipping reset\n");
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (adapter->state == VNIC_PROBING) {
|
||||
netdev_warn(netdev, "Adapter reset during probe\n");
|
||||
adapter->init_done_rc = EAGAIN;
|
||||
return;
|
||||
ret = adapter->init_done_rc = EAGAIN;
|
||||
goto err;
|
||||
}
|
||||
|
||||
mutex_lock(&adapter->rwi_lock);
|
||||
|
@ -1904,7 +1936,8 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
|||
if (tmp->reset_reason == reason) {
|
||||
netdev_dbg(netdev, "Skipping matching reset\n");
|
||||
mutex_unlock(&adapter->rwi_lock);
|
||||
return;
|
||||
ret = EBUSY;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1912,7 +1945,8 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
|||
if (!rwi) {
|
||||
mutex_unlock(&adapter->rwi_lock);
|
||||
ibmvnic_close(netdev);
|
||||
return;
|
||||
ret = ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
|
||||
rwi->reset_reason = reason;
|
||||
|
@ -1921,6 +1955,12 @@ static void ibmvnic_reset(struct ibmvnic_adapter *adapter,
|
|||
|
||||
netdev_dbg(adapter->netdev, "Scheduling reset (reason %d)\n", reason);
|
||||
schedule_work(&adapter->ibmvnic_reset);
|
||||
|
||||
return 0;
|
||||
err:
|
||||
if (adapter->wait_for_reset)
|
||||
adapter->wait_for_reset = false;
|
||||
return -ret;
|
||||
}
|
||||
|
||||
static void ibmvnic_tx_timeout(struct net_device *dev)
|
||||
|
@ -2055,6 +2095,8 @@ static void ibmvnic_netpoll_controller(struct net_device *dev)
|
|||
|
||||
static int wait_for_reset(struct ibmvnic_adapter *adapter)
|
||||
{
|
||||
int rc, ret;
|
||||
|
||||
adapter->fallback.mtu = adapter->req_mtu;
|
||||
adapter->fallback.rx_queues = adapter->req_rx_queues;
|
||||
adapter->fallback.tx_queues = adapter->req_tx_queues;
|
||||
|
@ -2062,11 +2104,15 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
|
|||
adapter->fallback.tx_entries = adapter->req_tx_entries_per_subcrq;
|
||||
|
||||
init_completion(&adapter->reset_done);
|
||||
ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
|
||||
adapter->wait_for_reset = true;
|
||||
rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
|
||||
if (rc)
|
||||
return rc;
|
||||
wait_for_completion(&adapter->reset_done);
|
||||
|
||||
ret = 0;
|
||||
if (adapter->reset_done_rc) {
|
||||
ret = -EIO;
|
||||
adapter->desired.mtu = adapter->fallback.mtu;
|
||||
adapter->desired.rx_queues = adapter->fallback.rx_queues;
|
||||
adapter->desired.tx_queues = adapter->fallback.tx_queues;
|
||||
|
@ -2074,12 +2120,15 @@ static int wait_for_reset(struct ibmvnic_adapter *adapter)
|
|||
adapter->desired.tx_entries = adapter->fallback.tx_entries;
|
||||
|
||||
init_completion(&adapter->reset_done);
|
||||
ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
|
||||
adapter->wait_for_reset = true;
|
||||
rc = ibmvnic_reset(adapter, VNIC_RESET_CHANGE_PARAM);
|
||||
if (rc)
|
||||
return ret;
|
||||
wait_for_completion(&adapter->reset_done);
|
||||
}
|
||||
adapter->wait_for_reset = false;
|
||||
|
||||
return adapter->reset_done_rc;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int ibmvnic_change_mtu(struct net_device *netdev, int new_mtu)
|
||||
|
@ -2364,6 +2413,7 @@ static int reset_one_sub_crq_queue(struct ibmvnic_adapter *adapter,
|
|||
}
|
||||
|
||||
memset(scrq->msgs, 0, 4 * PAGE_SIZE);
|
||||
atomic_set(&scrq->used, 0);
|
||||
scrq->cur = 0;
|
||||
|
||||
rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
|
||||
|
@ -2574,7 +2624,7 @@ static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
|
|||
union sub_crq *next;
|
||||
int index;
|
||||
int i, j;
|
||||
u8 first;
|
||||
u8 *first;
|
||||
|
||||
restart_loop:
|
||||
while (pending_scrq(adapter, scrq)) {
|
||||
|
@ -2605,11 +2655,12 @@ restart_loop:
|
|||
txbuff->data_dma[j] = 0;
|
||||
}
|
||||
/* if sub_crq was sent indirectly */
|
||||
first = txbuff->indir_arr[0].generic.first;
|
||||
if (first == IBMVNIC_CRQ_CMD) {
|
||||
first = &txbuff->indir_arr[0].generic.first;
|
||||
if (*first == IBMVNIC_CRQ_CMD) {
|
||||
dma_unmap_single(dev, txbuff->indir_dma,
|
||||
sizeof(txbuff->indir_arr),
|
||||
DMA_TO_DEVICE);
|
||||
*first = 0;
|
||||
}
|
||||
|
||||
if (txbuff->last_frag) {
|
||||
|
@ -3882,9 +3933,9 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
|
|||
int i;
|
||||
|
||||
dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
|
||||
DMA_BIDIRECTIONAL);
|
||||
DMA_TO_DEVICE);
|
||||
dma_unmap_single(dev, adapter->login_rsp_buf_token,
|
||||
adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
|
||||
adapter->login_rsp_buf_sz, DMA_FROM_DEVICE);
|
||||
|
||||
/* If the number of queues requested can't be allocated by the
|
||||
* server, the login response will return with code 1. We will need
|
||||
|
@ -4144,7 +4195,9 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
|||
case IBMVNIC_CRQ_INIT:
|
||||
dev_info(dev, "Partner initialized\n");
|
||||
adapter->from_passive_init = true;
|
||||
adapter->failover_pending = false;
|
||||
complete(&adapter->init_done);
|
||||
ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
|
||||
break;
|
||||
case IBMVNIC_CRQ_INIT_COMPLETE:
|
||||
dev_info(dev, "Partner initialization complete\n");
|
||||
|
@ -4161,7 +4214,7 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
|
|||
ibmvnic_reset(adapter, VNIC_RESET_MOBILITY);
|
||||
} else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
|
||||
dev_info(dev, "Backing device failover detected\n");
|
||||
ibmvnic_reset(adapter, VNIC_RESET_FAILOVER);
|
||||
adapter->failover_pending = true;
|
||||
} else {
|
||||
/* The adapter lost the connection */
|
||||
dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
|
||||
|
@ -4461,19 +4514,6 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
|
|||
u64 old_num_rx_queues, old_num_tx_queues;
|
||||
int rc;
|
||||
|
||||
if (adapter->resetting && !adapter->wait_for_reset) {
|
||||
rc = ibmvnic_reset_crq(adapter);
|
||||
if (!rc)
|
||||
rc = vio_enable_interrupts(adapter->vdev);
|
||||
} else {
|
||||
rc = init_crq_queue(adapter);
|
||||
}
|
||||
|
||||
if (rc) {
|
||||
dev_err(dev, "Couldn't initialize crq. rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
adapter->from_passive_init = false;
|
||||
|
||||
old_num_rx_queues = adapter->req_rx_queues;
|
||||
|
@ -4498,7 +4538,8 @@ static int ibmvnic_init(struct ibmvnic_adapter *adapter)
|
|||
return -1;
|
||||
}
|
||||
|
||||
if (adapter->resetting && !adapter->wait_for_reset) {
|
||||
if (adapter->resetting && !adapter->wait_for_reset &&
|
||||
adapter->reset_reason != VNIC_RESET_MOBILITY) {
|
||||
if (adapter->req_rx_queues != old_num_rx_queues ||
|
||||
adapter->req_tx_queues != old_num_tx_queues) {
|
||||
release_sub_crqs(adapter, 0);
|
||||
|
@ -4586,6 +4627,13 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
|
|||
adapter->mac_change_pending = false;
|
||||
|
||||
do {
|
||||
rc = init_crq_queue(adapter);
|
||||
if (rc) {
|
||||
dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n",
|
||||
rc);
|
||||
goto ibmvnic_init_fail;
|
||||
}
|
||||
|
||||
rc = ibmvnic_init(adapter);
|
||||
if (rc && rc != EAGAIN)
|
||||
goto ibmvnic_init_fail;
|
||||
|
|
|
@ -1108,6 +1108,7 @@ struct ibmvnic_adapter {
|
|||
bool napi_enabled, from_passive_init;
|
||||
|
||||
bool mac_change_pending;
|
||||
bool failover_pending;
|
||||
|
||||
struct ibmvnic_tunables desired;
|
||||
struct ibmvnic_tunables fallback;
|
||||
|
|
|
@ -468,8 +468,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
|
|||
mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp);
|
||||
mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL);
|
||||
|
||||
if (!mac_buf)
|
||||
if (!mac_buf) {
|
||||
status = ICE_ERR_NO_MEMORY;
|
||||
goto err_unroll_fltr_mgmt_struct;
|
||||
}
|
||||
|
||||
status = ice_aq_manage_mac_read(hw, mac_buf, mac_buf_len, NULL);
|
||||
devm_kfree(ice_hw_to_dev(hw), mac_buf);
|
||||
|
|
|
@ -156,7 +156,7 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
|
|||
|
||||
static int ice_get_regs_len(struct net_device __always_unused *netdev)
|
||||
{
|
||||
return ARRAY_SIZE(ice_regs_dump_list);
|
||||
return sizeof(ice_regs_dump_list);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -170,7 +170,7 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
|
|||
|
||||
regs->version = 1;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list) / sizeof(u32); ++i)
|
||||
for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
|
||||
regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
|
||||
}
|
||||
|
||||
|
|
|
@ -1604,7 +1604,7 @@ static int mvpp2_prs_init_from_hw(struct mvpp2 *priv,
|
|||
{
|
||||
int i;
|
||||
|
||||
if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
|
||||
if (tid > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
|
||||
return -EINVAL;
|
||||
|
||||
memset(pe, 0, sizeof(*pe));
|
||||
|
|
|
@ -3805,18 +3805,6 @@ static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
|
|||
},
|
||||
};
|
||||
|
||||
static u64 mlxsw_sp_resource_kvd_linear_occ_get(struct devlink *devlink)
|
||||
{
|
||||
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
|
||||
|
||||
return mlxsw_sp_kvdl_occ_get(mlxsw_sp);
|
||||
}
|
||||
|
||||
static const struct devlink_resource_ops mlxsw_sp_resource_kvd_linear_ops = {
|
||||
.occ_get = mlxsw_sp_resource_kvd_linear_occ_get,
|
||||
};
|
||||
|
||||
static void
|
||||
mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
|
||||
struct devlink_resource_size_params *kvd_size_params,
|
||||
|
@ -3877,8 +3865,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
|
|||
err = devlink_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
|
||||
kvd_size, MLXSW_SP_RESOURCE_KVD,
|
||||
DEVLINK_RESOURCE_ID_PARENT_TOP,
|
||||
&kvd_size_params,
|
||||
NULL);
|
||||
&kvd_size_params);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -3887,8 +3874,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
|
|||
linear_size,
|
||||
MLXSW_SP_RESOURCE_KVD_LINEAR,
|
||||
MLXSW_SP_RESOURCE_KVD,
|
||||
&linear_size_params,
|
||||
&mlxsw_sp_resource_kvd_linear_ops);
|
||||
&linear_size_params);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -3905,8 +3891,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
|
|||
double_size,
|
||||
MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
|
||||
MLXSW_SP_RESOURCE_KVD,
|
||||
&hash_double_size_params,
|
||||
NULL);
|
||||
&hash_double_size_params);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -3915,8 +3900,7 @@ static int mlxsw_sp_resources_register(struct mlxsw_core *mlxsw_core)
|
|||
single_size,
|
||||
MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
|
||||
MLXSW_SP_RESOURCE_KVD,
|
||||
&hash_single_size_params,
|
||||
NULL);
|
||||
&hash_single_size_params);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
|
|
@ -442,7 +442,6 @@ void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
|
|||
int mlxsw_sp_kvdl_alloc_size_query(struct mlxsw_sp *mlxsw_sp,
|
||||
unsigned int entry_count,
|
||||
unsigned int *p_alloc_size);
|
||||
u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp);
|
||||
int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core);
|
||||
|
||||
struct mlxsw_sp_acl_rule_info {
|
||||
|
|
|
@ -315,8 +315,9 @@ static u64 mlxsw_sp_kvdl_part_occ(struct mlxsw_sp_kvdl_part *part)
|
|||
return occ;
|
||||
}
|
||||
|
||||
u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
|
||||
static u64 mlxsw_sp_kvdl_occ_get(void *priv)
|
||||
{
|
||||
const struct mlxsw_sp *mlxsw_sp = priv;
|
||||
u64 occ = 0;
|
||||
int i;
|
||||
|
||||
|
@ -326,48 +327,33 @@ u64 mlxsw_sp_kvdl_occ_get(const struct mlxsw_sp *mlxsw_sp)
|
|||
return occ;
|
||||
}
|
||||
|
||||
static u64 mlxsw_sp_kvdl_single_occ_get(struct devlink *devlink)
|
||||
static u64 mlxsw_sp_kvdl_single_occ_get(void *priv)
|
||||
{
|
||||
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
|
||||
const struct mlxsw_sp *mlxsw_sp = priv;
|
||||
struct mlxsw_sp_kvdl_part *part;
|
||||
|
||||
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_SINGLE];
|
||||
return mlxsw_sp_kvdl_part_occ(part);
|
||||
}
|
||||
|
||||
static u64 mlxsw_sp_kvdl_chunks_occ_get(struct devlink *devlink)
|
||||
static u64 mlxsw_sp_kvdl_chunks_occ_get(void *priv)
|
||||
{
|
||||
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
|
||||
const struct mlxsw_sp *mlxsw_sp = priv;
|
||||
struct mlxsw_sp_kvdl_part *part;
|
||||
|
||||
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_CHUNKS];
|
||||
return mlxsw_sp_kvdl_part_occ(part);
|
||||
}
|
||||
|
||||
static u64 mlxsw_sp_kvdl_large_chunks_occ_get(struct devlink *devlink)
|
||||
static u64 mlxsw_sp_kvdl_large_chunks_occ_get(void *priv)
|
||||
{
|
||||
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
|
||||
struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
|
||||
const struct mlxsw_sp *mlxsw_sp = priv;
|
||||
struct mlxsw_sp_kvdl_part *part;
|
||||
|
||||
part = mlxsw_sp->kvdl->parts[MLXSW_SP_KVDL_PART_ID_LARGE_CHUNKS];
|
||||
return mlxsw_sp_kvdl_part_occ(part);
|
||||
}
|
||||
|
||||
static const struct devlink_resource_ops mlxsw_sp_kvdl_single_ops = {
|
||||
.occ_get = mlxsw_sp_kvdl_single_occ_get,
|
||||
};
|
||||
|
||||
static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_ops = {
|
||||
.occ_get = mlxsw_sp_kvdl_chunks_occ_get,
|
||||
};
|
||||
|
||||
static const struct devlink_resource_ops mlxsw_sp_kvdl_chunks_large_ops = {
|
||||
.occ_get = mlxsw_sp_kvdl_large_chunks_occ_get,
|
||||
};
|
||||
|
||||
int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
|
||||
{
|
||||
struct devlink *devlink = priv_to_devlink(mlxsw_core);
|
||||
|
@ -386,8 +372,7 @@ int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
|
|||
MLXSW_SP_KVDL_SINGLE_SIZE,
|
||||
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
|
||||
MLXSW_SP_RESOURCE_KVD_LINEAR,
|
||||
&size_params,
|
||||
&mlxsw_sp_kvdl_single_ops);
|
||||
&size_params);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -398,8 +383,7 @@ int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
|
|||
MLXSW_SP_KVDL_CHUNKS_SIZE,
|
||||
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
|
||||
MLXSW_SP_RESOURCE_KVD_LINEAR,
|
||||
&size_params,
|
||||
&mlxsw_sp_kvdl_chunks_ops);
|
||||
&size_params);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -410,13 +394,13 @@ int mlxsw_sp_kvdl_resources_register(struct mlxsw_core *mlxsw_core)
|
|||
MLXSW_SP_KVDL_LARGE_CHUNKS_SIZE,
|
||||
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
|
||||
MLXSW_SP_RESOURCE_KVD_LINEAR,
|
||||
&size_params,
|
||||
&mlxsw_sp_kvdl_chunks_large_ops);
|
||||
&size_params);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
|
||||
struct mlxsw_sp_kvdl *kvdl;
|
||||
int err;
|
||||
|
||||
|
@ -429,6 +413,23 @@ int mlxsw_sp_kvdl_init(struct mlxsw_sp *mlxsw_sp)
|
|||
if (err)
|
||||
goto err_kvdl_parts_init;
|
||||
|
||||
devlink_resource_occ_get_register(devlink,
|
||||
MLXSW_SP_RESOURCE_KVD_LINEAR,
|
||||
mlxsw_sp_kvdl_occ_get,
|
||||
mlxsw_sp);
|
||||
devlink_resource_occ_get_register(devlink,
|
||||
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE,
|
||||
mlxsw_sp_kvdl_single_occ_get,
|
||||
mlxsw_sp);
|
||||
devlink_resource_occ_get_register(devlink,
|
||||
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS,
|
||||
mlxsw_sp_kvdl_chunks_occ_get,
|
||||
mlxsw_sp);
|
||||
devlink_resource_occ_get_register(devlink,
|
||||
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS,
|
||||
mlxsw_sp_kvdl_large_chunks_occ_get,
|
||||
mlxsw_sp);
|
||||
|
||||
return 0;
|
||||
|
||||
err_kvdl_parts_init:
|
||||
|
@ -438,6 +439,16 @@ err_kvdl_parts_init:
|
|||
|
||||
void mlxsw_sp_kvdl_fini(struct mlxsw_sp *mlxsw_sp)
|
||||
{
|
||||
struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
|
||||
|
||||
devlink_resource_occ_get_unregister(devlink,
|
||||
MLXSW_SP_RESOURCE_KVD_LINEAR_LARGE_CHUNKS);
|
||||
devlink_resource_occ_get_unregister(devlink,
|
||||
MLXSW_SP_RESOURCE_KVD_LINEAR_CHUNKS);
|
||||
devlink_resource_occ_get_unregister(devlink,
|
||||
MLXSW_SP_RESOURCE_KVD_LINEAR_SINGLE);
|
||||
devlink_resource_occ_get_unregister(devlink,
|
||||
MLXSW_SP_RESOURCE_KVD_LINEAR);
|
||||
mlxsw_sp_kvdl_parts_fini(mlxsw_sp);
|
||||
kfree(mlxsw_sp->kvdl);
|
||||
}
|
||||
|
|
|
@ -109,11 +109,11 @@ static void free_netvsc_device_rcu(struct netvsc_device *nvdev)
|
|||
call_rcu(&nvdev->rcu, free_netvsc_device);
|
||||
}
|
||||
|
||||
static void netvsc_revoke_buf(struct hv_device *device,
|
||||
struct netvsc_device *net_device)
|
||||
static void netvsc_revoke_recv_buf(struct hv_device *device,
|
||||
struct netvsc_device *net_device,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
struct nvsp_message *revoke_packet;
|
||||
struct net_device *ndev = hv_get_drvdata(device);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
|
@ -157,6 +157,14 @@ static void netvsc_revoke_buf(struct hv_device *device,
|
|||
}
|
||||
net_device->recv_section_cnt = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void netvsc_revoke_send_buf(struct hv_device *device,
|
||||
struct netvsc_device *net_device,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
struct nvsp_message *revoke_packet;
|
||||
int ret;
|
||||
|
||||
/* Deal with the send buffer we may have setup.
|
||||
* If we got a send section size, it means we received a
|
||||
|
@ -202,10 +210,10 @@ static void netvsc_revoke_buf(struct hv_device *device,
|
|||
}
|
||||
}
|
||||
|
||||
static void netvsc_teardown_gpadl(struct hv_device *device,
|
||||
struct netvsc_device *net_device)
|
||||
static void netvsc_teardown_recv_gpadl(struct hv_device *device,
|
||||
struct netvsc_device *net_device,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
struct net_device *ndev = hv_get_drvdata(device);
|
||||
int ret;
|
||||
|
||||
if (net_device->recv_buf_gpadl_handle) {
|
||||
|
@ -222,6 +230,13 @@ static void netvsc_teardown_gpadl(struct hv_device *device,
|
|||
}
|
||||
net_device->recv_buf_gpadl_handle = 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void netvsc_teardown_send_gpadl(struct hv_device *device,
|
||||
struct netvsc_device *net_device,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (net_device->send_buf_gpadl_handle) {
|
||||
ret = vmbus_teardown_gpadl(device->channel,
|
||||
|
@ -437,8 +452,10 @@ static int netvsc_init_buf(struct hv_device *device,
|
|||
goto exit;
|
||||
|
||||
cleanup:
|
||||
netvsc_revoke_buf(device, net_device);
|
||||
netvsc_teardown_gpadl(device, net_device);
|
||||
netvsc_revoke_recv_buf(device, net_device, ndev);
|
||||
netvsc_revoke_send_buf(device, net_device, ndev);
|
||||
netvsc_teardown_recv_gpadl(device, net_device, ndev);
|
||||
netvsc_teardown_send_gpadl(device, net_device, ndev);
|
||||
|
||||
exit:
|
||||
return ret;
|
||||
|
@ -457,7 +474,6 @@ static int negotiate_nvsp_ver(struct hv_device *device,
|
|||
init_packet->hdr.msg_type = NVSP_MSG_TYPE_INIT;
|
||||
init_packet->msg.init_msg.init.min_protocol_ver = nvsp_ver;
|
||||
init_packet->msg.init_msg.init.max_protocol_ver = nvsp_ver;
|
||||
|
||||
trace_nvsp_send(ndev, init_packet);
|
||||
|
||||
/* Send the init request */
|
||||
|
@ -575,7 +591,17 @@ void netvsc_device_remove(struct hv_device *device)
|
|||
= rtnl_dereference(net_device_ctx->nvdev);
|
||||
int i;
|
||||
|
||||
netvsc_revoke_buf(device, net_device);
|
||||
/*
|
||||
* Revoke receive buffer. If host is pre-Win2016 then tear down
|
||||
* receive buffer GPADL. Do the same for send buffer.
|
||||
*/
|
||||
netvsc_revoke_recv_buf(device, net_device, ndev);
|
||||
if (vmbus_proto_version < VERSION_WIN10)
|
||||
netvsc_teardown_recv_gpadl(device, net_device, ndev);
|
||||
|
||||
netvsc_revoke_send_buf(device, net_device, ndev);
|
||||
if (vmbus_proto_version < VERSION_WIN10)
|
||||
netvsc_teardown_send_gpadl(device, net_device, ndev);
|
||||
|
||||
RCU_INIT_POINTER(net_device_ctx->nvdev, NULL);
|
||||
|
||||
|
@ -589,15 +615,17 @@ void netvsc_device_remove(struct hv_device *device)
|
|||
*/
|
||||
netdev_dbg(ndev, "net device safe to remove\n");
|
||||
|
||||
/* older versions require that buffer be revoked before close */
|
||||
if (net_device->nvsp_version < NVSP_PROTOCOL_VERSION_4)
|
||||
netvsc_teardown_gpadl(device, net_device);
|
||||
|
||||
/* Now, we can close the channel safely */
|
||||
vmbus_close(device->channel);
|
||||
|
||||
if (net_device->nvsp_version >= NVSP_PROTOCOL_VERSION_4)
|
||||
netvsc_teardown_gpadl(device, net_device);
|
||||
/*
|
||||
* If host is Win2016 or higher then we do the GPADL tear down
|
||||
* here after VMBus is closed.
|
||||
*/
|
||||
if (vmbus_proto_version >= VERSION_WIN10) {
|
||||
netvsc_teardown_recv_gpadl(device, net_device, ndev);
|
||||
netvsc_teardown_send_gpadl(device, net_device, ndev);
|
||||
}
|
||||
|
||||
/* Release all resources */
|
||||
free_netvsc_device_rcu(net_device);
|
||||
|
|
|
@ -30,52 +30,36 @@ static struct net *nsim_devlink_net(struct devlink *devlink)
|
|||
|
||||
/* IPv4
|
||||
*/
|
||||
static u64 nsim_ipv4_fib_resource_occ_get(struct devlink *devlink)
|
||||
static u64 nsim_ipv4_fib_resource_occ_get(void *priv)
|
||||
{
|
||||
struct net *net = nsim_devlink_net(devlink);
|
||||
struct net *net = priv;
|
||||
|
||||
return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, false);
|
||||
}
|
||||
|
||||
static struct devlink_resource_ops nsim_ipv4_fib_res_ops = {
|
||||
.occ_get = nsim_ipv4_fib_resource_occ_get,
|
||||
};
|
||||
|
||||
static u64 nsim_ipv4_fib_rules_res_occ_get(struct devlink *devlink)
|
||||
static u64 nsim_ipv4_fib_rules_res_occ_get(void *priv)
|
||||
{
|
||||
struct net *net = nsim_devlink_net(devlink);
|
||||
struct net *net = priv;
|
||||
|
||||
return nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, false);
|
||||
}
|
||||
|
||||
static struct devlink_resource_ops nsim_ipv4_fib_rules_res_ops = {
|
||||
.occ_get = nsim_ipv4_fib_rules_res_occ_get,
|
||||
};
|
||||
|
||||
/* IPv6
|
||||
*/
|
||||
static u64 nsim_ipv6_fib_resource_occ_get(struct devlink *devlink)
|
||||
static u64 nsim_ipv6_fib_resource_occ_get(void *priv)
|
||||
{
|
||||
struct net *net = nsim_devlink_net(devlink);
|
||||
struct net *net = priv;
|
||||
|
||||
return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, false);
|
||||
}
|
||||
|
||||
static struct devlink_resource_ops nsim_ipv6_fib_res_ops = {
|
||||
.occ_get = nsim_ipv6_fib_resource_occ_get,
|
||||
};
|
||||
|
||||
static u64 nsim_ipv6_fib_rules_res_occ_get(struct devlink *devlink)
|
||||
static u64 nsim_ipv6_fib_rules_res_occ_get(void *priv)
|
||||
{
|
||||
struct net *net = nsim_devlink_net(devlink);
|
||||
struct net *net = priv;
|
||||
|
||||
return nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, false);
|
||||
}
|
||||
|
||||
static struct devlink_resource_ops nsim_ipv6_fib_rules_res_ops = {
|
||||
.occ_get = nsim_ipv6_fib_rules_res_occ_get,
|
||||
};
|
||||
|
||||
static int devlink_resources_register(struct devlink *devlink)
|
||||
{
|
||||
struct devlink_resource_size_params params = {
|
||||
|
@ -91,7 +75,7 @@ static int devlink_resources_register(struct devlink *devlink)
|
|||
err = devlink_resource_register(devlink, "IPv4", (u64)-1,
|
||||
NSIM_RESOURCE_IPV4,
|
||||
DEVLINK_RESOURCE_ID_PARENT_TOP,
|
||||
¶ms, NULL);
|
||||
¶ms);
|
||||
if (err) {
|
||||
pr_err("Failed to register IPv4 top resource\n");
|
||||
goto out;
|
||||
|
@ -100,8 +84,7 @@ static int devlink_resources_register(struct devlink *devlink)
|
|||
n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB, true);
|
||||
err = devlink_resource_register(devlink, "fib", n,
|
||||
NSIM_RESOURCE_IPV4_FIB,
|
||||
NSIM_RESOURCE_IPV4,
|
||||
¶ms, &nsim_ipv4_fib_res_ops);
|
||||
NSIM_RESOURCE_IPV4, ¶ms);
|
||||
if (err) {
|
||||
pr_err("Failed to register IPv4 FIB resource\n");
|
||||
return err;
|
||||
|
@ -110,8 +93,7 @@ static int devlink_resources_register(struct devlink *devlink)
|
|||
n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV4_FIB_RULES, true);
|
||||
err = devlink_resource_register(devlink, "fib-rules", n,
|
||||
NSIM_RESOURCE_IPV4_FIB_RULES,
|
||||
NSIM_RESOURCE_IPV4,
|
||||
¶ms, &nsim_ipv4_fib_rules_res_ops);
|
||||
NSIM_RESOURCE_IPV4, ¶ms);
|
||||
if (err) {
|
||||
pr_err("Failed to register IPv4 FIB rules resource\n");
|
||||
return err;
|
||||
|
@ -121,7 +103,7 @@ static int devlink_resources_register(struct devlink *devlink)
|
|||
err = devlink_resource_register(devlink, "IPv6", (u64)-1,
|
||||
NSIM_RESOURCE_IPV6,
|
||||
DEVLINK_RESOURCE_ID_PARENT_TOP,
|
||||
¶ms, NULL);
|
||||
¶ms);
|
||||
if (err) {
|
||||
pr_err("Failed to register IPv6 top resource\n");
|
||||
goto out;
|
||||
|
@ -130,8 +112,7 @@ static int devlink_resources_register(struct devlink *devlink)
|
|||
n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB, true);
|
||||
err = devlink_resource_register(devlink, "fib", n,
|
||||
NSIM_RESOURCE_IPV6_FIB,
|
||||
NSIM_RESOURCE_IPV6,
|
||||
¶ms, &nsim_ipv6_fib_res_ops);
|
||||
NSIM_RESOURCE_IPV6, ¶ms);
|
||||
if (err) {
|
||||
pr_err("Failed to register IPv6 FIB resource\n");
|
||||
return err;
|
||||
|
@ -140,12 +121,28 @@ static int devlink_resources_register(struct devlink *devlink)
|
|||
n = nsim_fib_get_val(net, NSIM_RESOURCE_IPV6_FIB_RULES, true);
|
||||
err = devlink_resource_register(devlink, "fib-rules", n,
|
||||
NSIM_RESOURCE_IPV6_FIB_RULES,
|
||||
NSIM_RESOURCE_IPV6,
|
||||
¶ms, &nsim_ipv6_fib_rules_res_ops);
|
||||
NSIM_RESOURCE_IPV6, ¶ms);
|
||||
if (err) {
|
||||
pr_err("Failed to register IPv6 FIB rules resource\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
devlink_resource_occ_get_register(devlink,
|
||||
NSIM_RESOURCE_IPV4_FIB,
|
||||
nsim_ipv4_fib_resource_occ_get,
|
||||
net);
|
||||
devlink_resource_occ_get_register(devlink,
|
||||
NSIM_RESOURCE_IPV4_FIB_RULES,
|
||||
nsim_ipv4_fib_rules_res_occ_get,
|
||||
net);
|
||||
devlink_resource_occ_get_register(devlink,
|
||||
NSIM_RESOURCE_IPV6_FIB,
|
||||
nsim_ipv6_fib_resource_occ_get,
|
||||
net);
|
||||
devlink_resource_occ_get_register(devlink,
|
||||
NSIM_RESOURCE_IPV6_FIB_RULES,
|
||||
nsim_ipv6_fib_rules_res_occ_get,
|
||||
net);
|
||||
out:
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -1207,6 +1207,23 @@ static void dp83640_remove(struct phy_device *phydev)
|
|||
kfree(dp83640);
|
||||
}
|
||||
|
||||
static int dp83640_soft_reset(struct phy_device *phydev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = genphy_soft_reset(phydev);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
/* From DP83640 datasheet: "Software driver code must wait 3 us
|
||||
* following a software reset before allowing further serial MII
|
||||
* operations with the DP83640."
|
||||
*/
|
||||
udelay(10); /* Taking udelay inaccuracy into account */
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int dp83640_config_init(struct phy_device *phydev)
|
||||
{
|
||||
struct dp83640_private *dp83640 = phydev->priv;
|
||||
|
@ -1501,6 +1518,7 @@ static struct phy_driver dp83640_driver = {
|
|||
.flags = PHY_HAS_INTERRUPT,
|
||||
.probe = dp83640_probe,
|
||||
.remove = dp83640_remove,
|
||||
.soft_reset = dp83640_soft_reset,
|
||||
.config_init = dp83640_config_init,
|
||||
.ack_interrupt = dp83640_ack_interrupt,
|
||||
.config_intr = dp83640_config_intr,
|
||||
|
|
|
@ -828,6 +828,22 @@ static int m88e1121_config_init(struct phy_device *phydev)
|
|||
return marvell_config_init(phydev);
|
||||
}
|
||||
|
||||
static int m88e1318_config_init(struct phy_device *phydev)
|
||||
{
|
||||
if (phy_interrupt_is_valid(phydev)) {
|
||||
int err = phy_modify_paged(
|
||||
phydev, MII_MARVELL_LED_PAGE,
|
||||
MII_88E1318S_PHY_LED_TCR,
|
||||
MII_88E1318S_PHY_LED_TCR_FORCE_INT,
|
||||
MII_88E1318S_PHY_LED_TCR_INTn_ENABLE |
|
||||
MII_88E1318S_PHY_LED_TCR_INT_ACTIVE_LOW);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
|
||||
return m88e1121_config_init(phydev);
|
||||
}
|
||||
|
||||
static int m88e1510_config_init(struct phy_device *phydev)
|
||||
{
|
||||
int err;
|
||||
|
@ -870,7 +886,7 @@ static int m88e1510_config_init(struct phy_device *phydev)
|
|||
phydev->advertising &= ~pause;
|
||||
}
|
||||
|
||||
return m88e1121_config_init(phydev);
|
||||
return m88e1318_config_init(phydev);
|
||||
}
|
||||
|
||||
static int m88e1118_config_aneg(struct phy_device *phydev)
|
||||
|
@ -2086,7 +2102,7 @@ static struct phy_driver marvell_drivers[] = {
|
|||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.probe = marvell_probe,
|
||||
.config_init = &m88e1121_config_init,
|
||||
.config_init = &m88e1318_config_init,
|
||||
.config_aneg = &m88e1318_config_aneg,
|
||||
.read_status = &marvell_read_status,
|
||||
.ack_interrupt = &marvell_ack_interrupt,
|
||||
|
|
|
@ -44,6 +44,10 @@ MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;"
|
|||
* Using this limit prevents one virtqueue from starving others. */
|
||||
#define VHOST_NET_WEIGHT 0x80000
|
||||
|
||||
/* Max number of packets transferred before requeueing the job.
|
||||
* Using this limit prevents one virtqueue from starving rx. */
|
||||
#define VHOST_NET_PKT_WEIGHT(vq) ((vq)->num * 2)
|
||||
|
||||
/* MAX number of TX used buffers for outstanding zerocopy */
|
||||
#define VHOST_MAX_PEND 128
|
||||
#define VHOST_GOODCOPY_LEN 256
|
||||
|
@ -473,6 +477,7 @@ static void handle_tx(struct vhost_net *net)
|
|||
struct socket *sock;
|
||||
struct vhost_net_ubuf_ref *uninitialized_var(ubufs);
|
||||
bool zcopy, zcopy_used;
|
||||
int sent_pkts = 0;
|
||||
|
||||
mutex_lock(&vq->mutex);
|
||||
sock = vq->private_data;
|
||||
|
@ -580,7 +585,8 @@ static void handle_tx(struct vhost_net *net)
|
|||
else
|
||||
vhost_zerocopy_signal_used(net, vq);
|
||||
vhost_net_tx_packet(net);
|
||||
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
|
||||
if (unlikely(total_len >= VHOST_NET_WEIGHT) ||
|
||||
unlikely(++sent_pkts >= VHOST_NET_PKT_WEIGHT(vq))) {
|
||||
vhost_poll_queue(&vq->poll);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -895,7 +895,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
|
|||
u16 conn_timeout);
|
||||
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
u8 dst_type, u8 sec_level, u16 conn_timeout,
|
||||
u8 role);
|
||||
u8 role, bdaddr_t *direct_rpa);
|
||||
struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
u8 sec_level, u8 auth_type);
|
||||
struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
|
||||
|
|
|
@ -231,14 +231,6 @@ struct devlink_dpipe_headers {
|
|||
unsigned int headers_count;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct devlink_resource_ops - resource ops
|
||||
* @occ_get: get the occupied size
|
||||
*/
|
||||
struct devlink_resource_ops {
|
||||
u64 (*occ_get)(struct devlink *devlink);
|
||||
};
|
||||
|
||||
/**
|
||||
* struct devlink_resource_size_params - resource's size parameters
|
||||
* @size_min: minimum size which can be set
|
||||
|
@ -265,6 +257,8 @@ devlink_resource_size_params_init(struct devlink_resource_size_params *size_para
|
|||
size_params->unit = unit;
|
||||
}
|
||||
|
||||
typedef u64 devlink_resource_occ_get_t(void *priv);
|
||||
|
||||
/**
|
||||
* struct devlink_resource - devlink resource
|
||||
* @name: name of the resource
|
||||
|
@ -277,7 +271,6 @@ devlink_resource_size_params_init(struct devlink_resource_size_params *size_para
|
|||
* @size_params: size parameters
|
||||
* @list: parent list
|
||||
* @resource_list: list of child resources
|
||||
* @resource_ops: resource ops
|
||||
*/
|
||||
struct devlink_resource {
|
||||
const char *name;
|
||||
|
@ -289,7 +282,8 @@ struct devlink_resource {
|
|||
struct devlink_resource_size_params size_params;
|
||||
struct list_head list;
|
||||
struct list_head resource_list;
|
||||
const struct devlink_resource_ops *resource_ops;
|
||||
devlink_resource_occ_get_t *occ_get;
|
||||
void *occ_get_priv;
|
||||
};
|
||||
|
||||
#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
|
||||
|
@ -409,8 +403,7 @@ int devlink_resource_register(struct devlink *devlink,
|
|||
u64 resource_size,
|
||||
u64 resource_id,
|
||||
u64 parent_resource_id,
|
||||
const struct devlink_resource_size_params *size_params,
|
||||
const struct devlink_resource_ops *resource_ops);
|
||||
const struct devlink_resource_size_params *size_params);
|
||||
void devlink_resources_unregister(struct devlink *devlink,
|
||||
struct devlink_resource *resource);
|
||||
int devlink_resource_size_get(struct devlink *devlink,
|
||||
|
@ -419,6 +412,12 @@ int devlink_resource_size_get(struct devlink *devlink,
|
|||
int devlink_dpipe_table_resource_set(struct devlink *devlink,
|
||||
const char *table_name, u64 resource_id,
|
||||
u64 resource_units);
|
||||
void devlink_resource_occ_get_register(struct devlink *devlink,
|
||||
u64 resource_id,
|
||||
devlink_resource_occ_get_t *occ_get,
|
||||
void *occ_get_priv);
|
||||
void devlink_resource_occ_get_unregister(struct devlink *devlink,
|
||||
u64 resource_id);
|
||||
|
||||
#else
|
||||
|
||||
|
@ -562,8 +561,7 @@ devlink_resource_register(struct devlink *devlink,
|
|||
u64 resource_size,
|
||||
u64 resource_id,
|
||||
u64 parent_resource_id,
|
||||
const struct devlink_resource_size_params *size_params,
|
||||
const struct devlink_resource_ops *resource_ops)
|
||||
const struct devlink_resource_size_params *size_params)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -589,6 +587,20 @@ devlink_dpipe_table_resource_set(struct devlink *devlink,
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void
|
||||
devlink_resource_occ_get_register(struct devlink *devlink,
|
||||
u64 resource_id,
|
||||
devlink_resource_occ_get_t *occ_get,
|
||||
void *occ_get_priv)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void
|
||||
devlink_resource_occ_get_unregister(struct devlink *devlink,
|
||||
u64 resource_id)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _NET_DEVLINK_H_ */
|
||||
|
|
|
@ -43,6 +43,7 @@ struct inet_timewait_sock {
|
|||
#define tw_family __tw_common.skc_family
|
||||
#define tw_state __tw_common.skc_state
|
||||
#define tw_reuse __tw_common.skc_reuse
|
||||
#define tw_reuseport __tw_common.skc_reuseport
|
||||
#define tw_ipv6only __tw_common.skc_ipv6only
|
||||
#define tw_bound_dev_if __tw_common.skc_bound_dev_if
|
||||
#define tw_node __tw_common.skc_nulls_node
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
static inline int rtnh_ok(const struct rtnexthop *rtnh, int remaining)
|
||||
{
|
||||
return remaining >= sizeof(*rtnh) &&
|
||||
return remaining >= (int)sizeof(*rtnh) &&
|
||||
rtnh->rtnh_len >= sizeof(*rtnh) &&
|
||||
rtnh->rtnh_len <= remaining;
|
||||
}
|
||||
|
|
|
@ -182,8 +182,10 @@ static void bpf_tcp_release(struct sock *sk)
|
|||
psock->cork = NULL;
|
||||
}
|
||||
|
||||
sk->sk_prot = psock->sk_proto;
|
||||
psock->sk_proto = NULL;
|
||||
if (psock->sk_proto) {
|
||||
sk->sk_prot = psock->sk_proto;
|
||||
psock->sk_proto = NULL;
|
||||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
@ -211,6 +213,12 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
|
|||
close_fun = psock->save_close;
|
||||
|
||||
write_lock_bh(&sk->sk_callback_lock);
|
||||
if (psock->cork) {
|
||||
free_start_sg(psock->sock, psock->cork);
|
||||
kfree(psock->cork);
|
||||
psock->cork = NULL;
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(md, mtmp, &psock->ingress, list) {
|
||||
list_del(&md->list);
|
||||
free_start_sg(psock->sock, md);
|
||||
|
|
|
@ -1226,18 +1226,6 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
|
|||
}
|
||||
}
|
||||
|
||||
static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
|
||||
enum bpf_attach_type attach_type)
|
||||
{
|
||||
switch (prog->type) {
|
||||
case BPF_PROG_TYPE_CGROUP_SOCK:
|
||||
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
|
||||
return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* last field in 'union bpf_attr' used by this command */
|
||||
#define BPF_PROG_LOAD_LAST_FIELD expected_attach_type
|
||||
|
||||
|
@ -1465,6 +1453,18 @@ out_free_tp:
|
|||
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
|
||||
static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
|
||||
enum bpf_attach_type attach_type)
|
||||
{
|
||||
switch (prog->type) {
|
||||
case BPF_PROG_TYPE_CGROUP_SOCK:
|
||||
case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
|
||||
return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
|
||||
|
||||
static int sockmap_get_from_fd(const union bpf_attr *attr,
|
||||
|
|
|
@ -749,18 +749,31 @@ static bool conn_use_rpa(struct hci_conn *conn)
|
|||
}
|
||||
|
||||
static void hci_req_add_le_create_conn(struct hci_request *req,
|
||||
struct hci_conn *conn)
|
||||
struct hci_conn *conn,
|
||||
bdaddr_t *direct_rpa)
|
||||
{
|
||||
struct hci_cp_le_create_conn cp;
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
u8 own_addr_type;
|
||||
|
||||
/* Update random address, but set require_privacy to false so
|
||||
* that we never connect with an non-resolvable address.
|
||||
/* If direct address was provided we use it instead of current
|
||||
* address.
|
||||
*/
|
||||
if (hci_update_random_address(req, false, conn_use_rpa(conn),
|
||||
&own_addr_type))
|
||||
return;
|
||||
if (direct_rpa) {
|
||||
if (bacmp(&req->hdev->random_addr, direct_rpa))
|
||||
hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
|
||||
direct_rpa);
|
||||
|
||||
/* direct address is always RPA */
|
||||
own_addr_type = ADDR_LE_DEV_RANDOM;
|
||||
} else {
|
||||
/* Update random address, but set require_privacy to false so
|
||||
* that we never connect with an non-resolvable address.
|
||||
*/
|
||||
if (hci_update_random_address(req, false, conn_use_rpa(conn),
|
||||
&own_addr_type))
|
||||
return;
|
||||
}
|
||||
|
||||
memset(&cp, 0, sizeof(cp));
|
||||
|
||||
|
@ -825,7 +838,7 @@ static void hci_req_directed_advertising(struct hci_request *req,
|
|||
|
||||
struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
u8 dst_type, u8 sec_level, u16 conn_timeout,
|
||||
u8 role)
|
||||
u8 role, bdaddr_t *direct_rpa)
|
||||
{
|
||||
struct hci_conn_params *params;
|
||||
struct hci_conn *conn;
|
||||
|
@ -940,7 +953,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
|||
hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
|
||||
}
|
||||
|
||||
hci_req_add_le_create_conn(&req, conn);
|
||||
hci_req_add_le_create_conn(&req, conn, direct_rpa);
|
||||
|
||||
create_conn:
|
||||
err = hci_req_run(&req, create_le_conn_complete);
|
||||
|
|
|
@ -4648,7 +4648,8 @@ static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
|
|||
/* This function requires the caller holds hdev->lock */
|
||||
static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
|
||||
bdaddr_t *addr,
|
||||
u8 addr_type, u8 adv_type)
|
||||
u8 addr_type, u8 adv_type,
|
||||
bdaddr_t *direct_rpa)
|
||||
{
|
||||
struct hci_conn *conn;
|
||||
struct hci_conn_params *params;
|
||||
|
@ -4699,7 +4700,8 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
|
|||
}
|
||||
|
||||
conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
|
||||
HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
|
||||
HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER,
|
||||
direct_rpa);
|
||||
if (!IS_ERR(conn)) {
|
||||
/* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned
|
||||
* by higher layer that tried to connect, if no then
|
||||
|
@ -4808,8 +4810,13 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
|
|||
bdaddr_type = irk->addr_type;
|
||||
}
|
||||
|
||||
/* Check if we have been requested to connect to this device */
|
||||
conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
|
||||
/* Check if we have been requested to connect to this device.
|
||||
*
|
||||
* direct_addr is set only for directed advertising reports (it is NULL
|
||||
* for advertising reports) and is already verified to be RPA above.
|
||||
*/
|
||||
conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type,
|
||||
direct_addr);
|
||||
if (conn && type == LE_ADV_IND) {
|
||||
/* Store report for later inclusion by
|
||||
* mgmt_device_connected
|
||||
|
|
|
@ -7156,7 +7156,7 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
|
|||
hcon = hci_connect_le(hdev, dst, dst_type,
|
||||
chan->sec_level,
|
||||
HCI_LE_CONN_TIMEOUT,
|
||||
HCI_ROLE_SLAVE);
|
||||
HCI_ROLE_SLAVE, NULL);
|
||||
else
|
||||
hcon = hci_connect_le_scan(hdev, dst, dst_type,
|
||||
chan->sec_level,
|
||||
|
|
|
@ -1027,7 +1027,7 @@ bool dev_valid_name(const char *name)
|
|||
{
|
||||
if (*name == '\0')
|
||||
return false;
|
||||
if (strlen(name) >= IFNAMSIZ)
|
||||
if (strnlen(name, IFNAMSIZ) == IFNAMSIZ)
|
||||
return false;
|
||||
if (!strcmp(name, ".") || !strcmp(name, ".."))
|
||||
return false;
|
||||
|
|
|
@ -57,8 +57,8 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list,
|
|||
return -EINVAL;
|
||||
|
||||
list_for_each_entry(ha, &list->list, list) {
|
||||
if (!memcmp(ha->addr, addr, addr_len) &&
|
||||
ha->type == addr_type) {
|
||||
if (ha->type == addr_type &&
|
||||
!memcmp(ha->addr, addr, addr_len)) {
|
||||
if (global) {
|
||||
/* check if addr is already used as global */
|
||||
if (ha->global_use)
|
||||
|
|
|
@ -2405,6 +2405,16 @@ devlink_resource_size_params_put(struct devlink_resource *resource,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int devlink_resource_occ_put(struct devlink_resource *resource,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
if (!resource->occ_get)
|
||||
return 0;
|
||||
return nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
|
||||
resource->occ_get(resource->occ_get_priv),
|
||||
DEVLINK_ATTR_PAD);
|
||||
}
|
||||
|
||||
static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
|
||||
struct devlink_resource *resource)
|
||||
{
|
||||
|
@ -2425,11 +2435,8 @@ static int devlink_resource_put(struct devlink *devlink, struct sk_buff *skb,
|
|||
if (resource->size != resource->size_new)
|
||||
nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_SIZE_NEW,
|
||||
resource->size_new, DEVLINK_ATTR_PAD);
|
||||
if (resource->resource_ops && resource->resource_ops->occ_get)
|
||||
if (nla_put_u64_64bit(skb, DEVLINK_ATTR_RESOURCE_OCC,
|
||||
resource->resource_ops->occ_get(devlink),
|
||||
DEVLINK_ATTR_PAD))
|
||||
goto nla_put_failure;
|
||||
if (devlink_resource_occ_put(resource, skb))
|
||||
goto nla_put_failure;
|
||||
if (devlink_resource_size_params_put(resource, skb))
|
||||
goto nla_put_failure;
|
||||
if (list_empty(&resource->resource_list))
|
||||
|
@ -3162,15 +3169,13 @@ EXPORT_SYMBOL_GPL(devlink_dpipe_table_unregister);
|
|||
* @resource_id: resource's id
|
||||
* @parent_reosurce_id: resource's parent id
|
||||
* @size params: size parameters
|
||||
* @resource_ops: resource ops
|
||||
*/
|
||||
int devlink_resource_register(struct devlink *devlink,
|
||||
const char *resource_name,
|
||||
u64 resource_size,
|
||||
u64 resource_id,
|
||||
u64 parent_resource_id,
|
||||
const struct devlink_resource_size_params *size_params,
|
||||
const struct devlink_resource_ops *resource_ops)
|
||||
const struct devlink_resource_size_params *size_params)
|
||||
{
|
||||
struct devlink_resource *resource;
|
||||
struct list_head *resource_list;
|
||||
|
@ -3213,7 +3218,6 @@ int devlink_resource_register(struct devlink *devlink,
|
|||
resource->size = resource_size;
|
||||
resource->size_new = resource_size;
|
||||
resource->id = resource_id;
|
||||
resource->resource_ops = resource_ops;
|
||||
resource->size_valid = true;
|
||||
memcpy(&resource->size_params, size_params,
|
||||
sizeof(resource->size_params));
|
||||
|
@ -3315,6 +3319,58 @@ out:
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(devlink_dpipe_table_resource_set);
|
||||
|
||||
/**
|
||||
* devlink_resource_occ_get_register - register occupancy getter
|
||||
*
|
||||
* @devlink: devlink
|
||||
* @resource_id: resource id
|
||||
* @occ_get: occupancy getter callback
|
||||
* @occ_get_priv: occupancy getter callback priv
|
||||
*/
|
||||
void devlink_resource_occ_get_register(struct devlink *devlink,
|
||||
u64 resource_id,
|
||||
devlink_resource_occ_get_t *occ_get,
|
||||
void *occ_get_priv)
|
||||
{
|
||||
struct devlink_resource *resource;
|
||||
|
||||
mutex_lock(&devlink->lock);
|
||||
resource = devlink_resource_find(devlink, NULL, resource_id);
|
||||
if (WARN_ON(!resource))
|
||||
goto out;
|
||||
WARN_ON(resource->occ_get);
|
||||
|
||||
resource->occ_get = occ_get;
|
||||
resource->occ_get_priv = occ_get_priv;
|
||||
out:
|
||||
mutex_unlock(&devlink->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devlink_resource_occ_get_register);
|
||||
|
||||
/**
|
||||
* devlink_resource_occ_get_unregister - unregister occupancy getter
|
||||
*
|
||||
* @devlink: devlink
|
||||
* @resource_id: resource id
|
||||
*/
|
||||
void devlink_resource_occ_get_unregister(struct devlink *devlink,
|
||||
u64 resource_id)
|
||||
{
|
||||
struct devlink_resource *resource;
|
||||
|
||||
mutex_lock(&devlink->lock);
|
||||
resource = devlink_resource_find(devlink, NULL, resource_id);
|
||||
if (WARN_ON(!resource))
|
||||
goto out;
|
||||
WARN_ON(!resource->occ_get);
|
||||
|
||||
resource->occ_get = NULL;
|
||||
resource->occ_get_priv = NULL;
|
||||
out:
|
||||
mutex_unlock(&devlink->lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devlink_resource_occ_get_unregister);
|
||||
|
||||
static int __init devlink_module_init(void)
|
||||
{
|
||||
return genl_register_family(&devlink_nl_family);
|
||||
|
|
|
@ -857,6 +857,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
|
|||
n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
|
||||
n->cloned = 1;
|
||||
n->nohdr = 0;
|
||||
n->peeked = 0;
|
||||
n->destructor = NULL;
|
||||
C(tail);
|
||||
C(end);
|
||||
|
|
|
@ -614,6 +614,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||
ireq = inet_rsk(req);
|
||||
sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
|
||||
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
|
||||
ireq->ir_mark = inet_request_mark(sk, skb);
|
||||
ireq->ireq_family = AF_INET;
|
||||
ireq->ir_iif = sk->sk_bound_dev_if;
|
||||
|
||||
|
|
|
@ -351,6 +351,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
|
|||
ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
|
||||
ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
|
||||
ireq->ireq_family = AF_INET6;
|
||||
ireq->ir_mark = inet_request_mark(sk, skb);
|
||||
|
||||
if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
|
||||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
|
||||
|
|
|
@ -126,6 +126,7 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
|
|||
struct dsa_port *cpu_dp = dev->dsa_ptr;
|
||||
struct dsa_switch_tree *dst = cpu_dp->dst;
|
||||
struct dsa_switch *ds;
|
||||
struct dsa_port *slave_port;
|
||||
|
||||
if (device < 0 || device >= DSA_MAX_SWITCHES)
|
||||
return NULL;
|
||||
|
@ -137,7 +138,12 @@ static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
|
|||
if (port < 0 || port >= ds->num_ports)
|
||||
return NULL;
|
||||
|
||||
return ds->ports[port].slave;
|
||||
slave_port = &ds->ports[port];
|
||||
|
||||
if (unlikely(slave_port->type != DSA_PORT_TYPE_USER))
|
||||
return NULL;
|
||||
|
||||
return slave_port->slave;
|
||||
}
|
||||
|
||||
/* port.c */
|
||||
|
|
|
@ -437,7 +437,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
|
|||
/*unsigned long now; */
|
||||
struct net *net = dev_net(dev);
|
||||
|
||||
rt = ip_route_output(net, sip, tip, 0, 0);
|
||||
rt = ip_route_output(net, sip, tip, 0, l3mdev_master_ifindex_rcu(dev));
|
||||
if (IS_ERR(rt))
|
||||
return 1;
|
||||
if (rt->dst.dev != dev) {
|
||||
|
|
|
@ -178,6 +178,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
|
|||
tw->tw_dport = inet->inet_dport;
|
||||
tw->tw_family = sk->sk_family;
|
||||
tw->tw_reuse = sk->sk_reuse;
|
||||
tw->tw_reuseport = sk->sk_reuseport;
|
||||
tw->tw_hash = sk->sk_hash;
|
||||
tw->tw_ipv6only = 0;
|
||||
tw->tw_transparent = inet->transparent;
|
||||
|
|
|
@ -211,6 +211,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
|
|||
p = kmem_cache_alloc(peer_cachep, GFP_ATOMIC);
|
||||
if (p) {
|
||||
p->daddr = *daddr;
|
||||
p->dtime = (__u32)jiffies;
|
||||
refcount_set(&p->refcnt, 2);
|
||||
atomic_set(&p->rid, 0);
|
||||
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
|
||||
|
|
|
@ -253,13 +253,14 @@ static struct net_device *__ip_tunnel_create(struct net *net,
|
|||
struct net_device *dev;
|
||||
char name[IFNAMSIZ];
|
||||
|
||||
if (parms->name[0])
|
||||
strlcpy(name, parms->name, IFNAMSIZ);
|
||||
else {
|
||||
if (strlen(ops->kind) > (IFNAMSIZ - 3)) {
|
||||
err = -E2BIG;
|
||||
err = -E2BIG;
|
||||
if (parms->name[0]) {
|
||||
if (!dev_valid_name(parms->name))
|
||||
goto failed;
|
||||
strlcpy(name, parms->name, IFNAMSIZ);
|
||||
} else {
|
||||
if (strlen(ops->kind) > (IFNAMSIZ - 3))
|
||||
goto failed;
|
||||
}
|
||||
strlcpy(name, ops->kind, IFNAMSIZ);
|
||||
strncat(name, "%d", 2);
|
||||
}
|
||||
|
|
|
@ -2296,13 +2296,14 @@ struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
|
|||
const struct sk_buff *skb)
|
||||
{
|
||||
__u8 tos = RT_FL_TOS(fl4);
|
||||
struct fib_result res;
|
||||
struct fib_result res = {
|
||||
.type = RTN_UNSPEC,
|
||||
.fi = NULL,
|
||||
.table = NULL,
|
||||
.tclassid = 0,
|
||||
};
|
||||
struct rtable *rth;
|
||||
|
||||
res.tclassid = 0;
|
||||
res.fi = NULL;
|
||||
res.table = NULL;
|
||||
|
||||
fl4->flowi4_iif = LOOPBACK_IFINDEX;
|
||||
fl4->flowi4_tos = tos & IPTOS_RT_MASK;
|
||||
fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
|
||||
|
|
|
@ -335,11 +335,13 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
|
|||
if (t || !create)
|
||||
return t;
|
||||
|
||||
if (parms->name[0])
|
||||
if (parms->name[0]) {
|
||||
if (!dev_valid_name(parms->name))
|
||||
return NULL;
|
||||
strlcpy(name, parms->name, IFNAMSIZ);
|
||||
else
|
||||
} else {
|
||||
strcpy(name, "ip6gre%d");
|
||||
|
||||
}
|
||||
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
|
||||
ip6gre_tunnel_setup);
|
||||
if (!dev)
|
||||
|
|
|
@ -375,6 +375,11 @@ static int ip6_forward_proxy_check(struct sk_buff *skb)
|
|||
static inline int ip6_forward_finish(struct net *net, struct sock *sk,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
|
||||
__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
|
||||
|
||||
return dst_output(net, sk, skb);
|
||||
}
|
||||
|
||||
|
@ -569,8 +574,6 @@ int ip6_forward(struct sk_buff *skb)
|
|||
|
||||
hdr->hop_limit--;
|
||||
|
||||
__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
|
||||
__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
|
||||
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
|
||||
net, NULL, skb, skb->dev, dst->dev,
|
||||
ip6_forward_finish);
|
||||
|
|
|
@ -297,13 +297,16 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
|
|||
struct net_device *dev;
|
||||
struct ip6_tnl *t;
|
||||
char name[IFNAMSIZ];
|
||||
int err = -ENOMEM;
|
||||
int err = -E2BIG;
|
||||
|
||||
if (p->name[0])
|
||||
if (p->name[0]) {
|
||||
if (!dev_valid_name(p->name))
|
||||
goto failed;
|
||||
strlcpy(name, p->name, IFNAMSIZ);
|
||||
else
|
||||
} else {
|
||||
sprintf(name, "ip6tnl%%d");
|
||||
|
||||
}
|
||||
err = -ENOMEM;
|
||||
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
|
||||
ip6_tnl_dev_setup);
|
||||
if (!dev)
|
||||
|
|
|
@ -212,10 +212,13 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
|
|||
char name[IFNAMSIZ];
|
||||
int err;
|
||||
|
||||
if (p->name[0])
|
||||
if (p->name[0]) {
|
||||
if (!dev_valid_name(p->name))
|
||||
goto failed;
|
||||
strlcpy(name, p->name, IFNAMSIZ);
|
||||
else
|
||||
} else {
|
||||
sprintf(name, "ip6_vti%%d");
|
||||
}
|
||||
|
||||
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
|
||||
if (!dev)
|
||||
|
|
|
@ -250,11 +250,13 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
|
|||
if (!create)
|
||||
goto failed;
|
||||
|
||||
if (parms->name[0])
|
||||
if (parms->name[0]) {
|
||||
if (!dev_valid_name(parms->name))
|
||||
goto failed;
|
||||
strlcpy(name, parms->name, IFNAMSIZ);
|
||||
else
|
||||
} else {
|
||||
strcpy(name, "sit%d");
|
||||
|
||||
}
|
||||
dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
|
||||
ipip6_tunnel_setup);
|
||||
if (!dev)
|
||||
|
|
|
@ -1844,6 +1844,8 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
|
|||
|
||||
if (msg->msg_namelen) {
|
||||
err = -EINVAL;
|
||||
if (msg->msg_namelen < sizeof(struct sockaddr_nl))
|
||||
goto out;
|
||||
if (addr->nl_family != AF_NETLINK)
|
||||
goto out;
|
||||
dst_portid = addr->nl_pid;
|
||||
|
|
|
@ -248,10 +248,14 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
|
|||
|
||||
static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
|
||||
{
|
||||
if (cfg->is_ebpf)
|
||||
bpf_prog_put(cfg->filter);
|
||||
else
|
||||
bpf_prog_destroy(cfg->filter);
|
||||
struct bpf_prog *filter = cfg->filter;
|
||||
|
||||
if (filter) {
|
||||
if (cfg->is_ebpf)
|
||||
bpf_prog_put(filter);
|
||||
else
|
||||
bpf_prog_destroy(filter);
|
||||
}
|
||||
|
||||
kfree(cfg->bpf_ops);
|
||||
kfree(cfg->bpf_name);
|
||||
|
|
|
@ -489,6 +489,7 @@ static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode *key)
|
|||
RCU_INIT_POINTER(*kp, key->next);
|
||||
|
||||
tcf_unbind_filter(tp, &key->res);
|
||||
idr_remove(&ht->handle_idr, key->handle);
|
||||
tcf_exts_get_net(&key->exts);
|
||||
call_rcu(&key->rcu, u32_delete_key_freepf_rcu);
|
||||
return 0;
|
||||
|
|
|
@ -757,8 +757,10 @@ static int sctp_v6_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
|
|||
sctp_v6_map_v4(addr);
|
||||
}
|
||||
|
||||
if (addr->sa.sa_family == AF_INET)
|
||||
if (addr->sa.sa_family == AF_INET) {
|
||||
memset(addr->v4.sin_zero, 0, sizeof(addr->v4.sin_zero));
|
||||
return sizeof(struct sockaddr_in);
|
||||
}
|
||||
return sizeof(struct sockaddr_in6);
|
||||
}
|
||||
|
||||
|
|
|
@ -357,11 +357,14 @@ static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt,
|
|||
if (!opt->pf->af_supported(addr->sa.sa_family, opt))
|
||||
return NULL;
|
||||
|
||||
/* V4 mapped address are really of AF_INET family */
|
||||
if (addr->sa.sa_family == AF_INET6 &&
|
||||
ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
|
||||
!opt->pf->af_supported(AF_INET, opt))
|
||||
return NULL;
|
||||
if (addr->sa.sa_family == AF_INET6) {
|
||||
if (len < SIN6_LEN_RFC2133)
|
||||
return NULL;
|
||||
/* V4 mapped address are really of AF_INET family */
|
||||
if (ipv6_addr_v4mapped(&addr->v6.sin6_addr) &&
|
||||
!opt->pf->af_supported(AF_INET, opt))
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* If we get this far, af is valid. */
|
||||
af = sctp_get_af_specific(addr->sa.sa_family);
|
||||
|
|
|
@ -59,7 +59,7 @@ static int __tipc_add_sock_diag(struct sk_buff *skb,
|
|||
if (!nlh)
|
||||
return -EMSGSIZE;
|
||||
|
||||
err = tipc_sk_fill_sock_diag(skb, tsk, req->tidiag_states,
|
||||
err = tipc_sk_fill_sock_diag(skb, cb, tsk, req->tidiag_states,
|
||||
__tipc_diag_gen_cookie);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -3257,8 +3257,8 @@ out:
|
|||
}
|
||||
EXPORT_SYMBOL(tipc_nl_sk_walk);
|
||||
|
||||
int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct tipc_sock *tsk,
|
||||
u32 sk_filter_state,
|
||||
int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct tipc_sock *tsk, u32 sk_filter_state,
|
||||
u64 (*tipc_diag_gen_cookie)(struct sock *sk))
|
||||
{
|
||||
struct sock *sk = &tsk->sk;
|
||||
|
@ -3280,7 +3280,7 @@ int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct tipc_sock *tsk,
|
|||
nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
|
||||
nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
|
||||
nla_put_u32(skb, TIPC_NLA_SOCK_UID,
|
||||
from_kuid_munged(sk_user_ns(NETLINK_CB(skb).sk),
|
||||
from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
|
||||
sock_i_uid(sk))) ||
|
||||
nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
|
||||
tipc_diag_gen_cookie(sk),
|
||||
|
|
|
@ -61,8 +61,8 @@ int tipc_sk_rht_init(struct net *net);
|
|||
void tipc_sk_rht_destroy(struct net *net);
|
||||
int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb);
|
||||
int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb);
|
||||
int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct tipc_sock *tsk,
|
||||
u32 sk_filter_state,
|
||||
int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
struct tipc_sock *tsk, u32 sk_filter_state,
|
||||
u64 (*tipc_diag_gen_cookie)(struct sock *sk));
|
||||
int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
|
||||
int (*skb_handler)(struct sk_buff *skb,
|
||||
|
|
Загрузка…
Ссылка в новой задаче