Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

This commit is contained in:
David S. Miller 2017-02-16 19:34:01 -05:00
Родитель f3caf8618b 4695daefba
Коммит 3f64116a83
66 изменённых файлов: 595 добавлений и 291 удалений

Просмотреть файл

@ -2478,12 +2478,11 @@ S: D-90453 Nuernberg
S: Germany
N: Arnaldo Carvalho de Melo
E: acme@ghostprotocols.net
E: acme@kernel.org
E: arnaldo.melo@gmail.com
E: acme@redhat.com
W: http://oops.ghostprotocols.net:81/blog/
P: 1024D/9224DF01 D5DF E3BB E3C8 BCBB F8AD 841A B6AB 4681 9224 DF01
D: IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks
D: tools/, IPX, LLC, DCCP, cyc2x, wl3501_cs, net/ hacks
S: Brazil
N: Karsten Merker

Просмотреть файл

@ -211,7 +211,13 @@ Colorspace sRGB (V4L2_COLORSPACE_SRGB)
The :ref:`srgb` standard defines the colorspace used by most webcams
and computer graphics. The default transfer function is
``V4L2_XFER_FUNC_SRGB``. The default Y'CbCr encoding is
``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full range.
``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited range.
Note that the :ref:`sycc` standard specifies full range quantization,
however all current capture hardware supported by the kernel convert
R'G'B' to limited range Y'CbCr. So choosing full range as the default
would break how applications interpret the quantization range.
The chromaticities of the primary colors and the white reference are:
@ -276,7 +282,7 @@ the following ``V4L2_YCBCR_ENC_601`` encoding as defined by :ref:`sycc`:
Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
[-0.5…0.5]. This transform is identical to one defined in SMPTE
170M/BT.601. The Y'CbCr quantization is full range.
170M/BT.601. The Y'CbCr quantization is limited range.
.. _col-adobergb:
@ -288,10 +294,15 @@ The :ref:`adobergb` standard defines the colorspace used by computer
graphics that use the AdobeRGB colorspace. This is also known as the
:ref:`oprgb` standard. The default transfer function is
``V4L2_XFER_FUNC_ADOBERGB``. The default Y'CbCr encoding is
``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is full
range. The chromaticities of the primary colors and the white reference
are:
``V4L2_YCBCR_ENC_601``. The default Y'CbCr quantization is limited
range.
Note that the :ref:`oprgb` standard specifies full range quantization,
however all current capture hardware supported by the kernel convert
R'G'B' to limited range Y'CbCr. So choosing full range as the default
would break how applications interpret the quantization range.
The chromaticities of the primary colors and the white reference are:
.. tabularcolumns:: |p{4.4cm}|p{4.4cm}|p{8.7cm}|
@ -344,7 +355,7 @@ the following ``V4L2_YCBCR_ENC_601`` encoding:
Y' is clamped to the range [0…1] and Cb and Cr are clamped to the range
[-0.5…0.5]. This transform is identical to one defined in SMPTE
170M/BT.601. The Y'CbCr quantization is full range.
170M/BT.601. The Y'CbCr quantization is limited range.
.. _col-bt2020:

Просмотреть файл

@ -877,8 +877,8 @@ S: Odd fixes
F: drivers/hwmon/applesmc.c
APPLETALK NETWORK LAYER
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
S: Maintained
L: netdev@vger.kernel.org
S: Odd fixes
F: drivers/net/appletalk/
F: net/appletalk/
@ -6748,9 +6748,8 @@ S: Odd Fixes
F: drivers/tty/ipwireless/
IPX NETWORK LAYER
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
L: netdev@vger.kernel.org
S: Maintained
S: Odd fixes
F: include/net/ipx.h
F: include/uapi/linux/ipx.h
F: net/ipx/
@ -7522,8 +7521,8 @@ S: Maintained
F: drivers/misc/lkdtm*
LLC (802.2)
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
S: Maintained
L: netdev@vger.kernel.org
S: Odd fixes
F: include/linux/llc.h
F: include/uapi/linux/llc.h
F: include/net/llc*
@ -13416,10 +13415,8 @@ S: Maintained
F: drivers/input/misc/wistron_btns.c
WL3501 WIRELESS PCMCIA CARD DRIVER
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
L: linux-wireless@vger.kernel.org
W: http://oops.ghostprotocols.net:81/blog
S: Maintained
S: Odd fixes
F: drivers/net/wireless/wl3501*
WOLFSON MICROELECTRONICS DRIVERS

Просмотреть файл

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 10
SUBLEVEL = 0
EXTRAVERSION = -rc7
EXTRAVERSION = -rc8
NAME = Fearless Coyote
# *DOCUMENTATION*

Просмотреть файл

@ -104,6 +104,7 @@ struct cpuinfo_x86 {
__u8 x86_phys_bits;
/* CPUID returned core id bits: */
__u8 x86_coreid_bits;
__u8 cu_id;
/* Max extended CPUID function supported: */
__u32 extended_cpuid_level;
/* Maximum supported CPUID level, -1=no CPUID: */

Просмотреть файл

@ -309,8 +309,22 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
/* get information required for multi-node processors */
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
u32 eax, ebx, ecx, edx;
node_id = cpuid_ecx(0x8000001e) & 7;
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
node_id = ecx & 0xff;
smp_num_siblings = ((ebx >> 8) & 0xff) + 1;
if (c->x86 == 0x15)
c->cu_id = ebx & 0xff;
if (c->x86 >= 0x17) {
c->cpu_core_id = ebx & 0xff;
if (smp_num_siblings > 1)
c->x86_max_cores /= smp_num_siblings;
}
/*
* We may have multiple LLCs if L3 caches exist, so check if we

Просмотреть файл

@ -1015,6 +1015,7 @@ static void identify_cpu(struct cpuinfo_x86 *c)
c->x86_model_id[0] = '\0'; /* Unset */
c->x86_max_cores = 1;
c->x86_coreid_bits = 0;
c->cu_id = 0xff;
#ifdef CONFIG_X86_64
c->x86_clflush_size = 64;
c->x86_phys_bits = 36;

Просмотреть файл

@ -433,10 +433,16 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
if (c->phys_proc_id == o->phys_proc_id &&
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
c->cpu_core_id == o->cpu_core_id)
per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) {
if (c->cpu_core_id == o->cpu_core_id)
return topology_sane(c, o, "smt");
if ((c->cu_id != 0xff) &&
(o->cu_id != 0xff) &&
(c->cu_id == o->cu_id))
return topology_sane(c, o, "smt");
}
} else if (c->phys_proc_id == o->phys_proc_id &&
c->cpu_core_id == o->cpu_core_id) {
return topology_sane(c, o, "smt");

Просмотреть файл

@ -1356,6 +1356,9 @@ void __init tsc_init(void)
(unsigned long)cpu_khz / 1000,
(unsigned long)cpu_khz % 1000);
/* Sanitize TSC ADJUST before cyc2ns gets initialized */
tsc_store_and_check_tsc_adjust(true);
/*
* Secondary CPUs do not run through tsc_init(), so set up
* all the scale factors for all CPUs, assuming the same
@ -1386,8 +1389,6 @@ void __init tsc_init(void)
if (unsynchronized_tsc())
mark_tsc_unstable("TSCs unsynchronized");
else
tsc_store_and_check_tsc_adjust(true);
check_system_tsc_reliable();

Просмотреть файл

@ -286,13 +286,6 @@ void check_tsc_sync_source(int cpu)
if (unsynchronized_tsc())
return;
if (tsc_clocksource_reliable) {
if (cpu == (nr_cpu_ids-1) || system_state != SYSTEM_BOOTING)
pr_info(
"Skipped synchronization checks as TSC is reliable.\n");
return;
}
/*
* Set the maximum number of test runs to
* 1 if the CPU does not provide the TSC_ADJUST MSR
@ -380,14 +373,19 @@ void check_tsc_sync_target(void)
int cpus = 2;
/* Also aborts if there is no TSC. */
if (unsynchronized_tsc() || tsc_clocksource_reliable)
if (unsynchronized_tsc())
return;
/*
* Store, verify and sanitize the TSC adjust register. If
* successful skip the test.
*
* The test is also skipped when the TSC is marked reliable. This
* is true for SoCs which have no fallback clocksource. On these
* SoCs the TSC is frequency synchronized, but still the TSC ADJUST
* register might have been wreckaged by the BIOS..
*/
if (tsc_store_and_check_tsc_adjust(false)) {
if (tsc_store_and_check_tsc_adjust(false) || tsc_clocksource_reliable) {
atomic_inc(&skip_test);
return;
}

Просмотреть файл

@ -15,6 +15,7 @@
#include <linux/debugfs.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <asm/pgtable.h>
@ -406,6 +407,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
} else
note_page(m, &st, __pgprot(0), 1);
cond_resched();
start++;
}

Просмотреть файл

@ -19,9 +19,9 @@
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/interrupt.h>
#include <linux/irqdomain.h>
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/mfd/syscon.h>
@ -39,6 +39,7 @@ struct keystone_irq_device {
struct irq_domain *irqd;
struct regmap *devctrl_regs;
u32 devctrl_offset;
raw_spinlock_t wa_lock;
};
static inline u32 keystone_irq_readl(struct keystone_irq_device *kirq)
@ -83,17 +84,15 @@ static void keystone_irq_ack(struct irq_data *d)
/* nothing to do here */
}
static void keystone_irq_handler(struct irq_desc *desc)
static irqreturn_t keystone_irq_handler(int irq, void *keystone_irq)
{
unsigned int irq = irq_desc_get_irq(desc);
struct keystone_irq_device *kirq = irq_desc_get_handler_data(desc);
struct keystone_irq_device *kirq = keystone_irq;
unsigned long wa_lock_flags;
unsigned long pending;
int src, virq;
dev_dbg(kirq->dev, "start irq %d\n", irq);
chained_irq_enter(irq_desc_get_chip(desc), desc);
pending = keystone_irq_readl(kirq);
keystone_irq_writel(kirq, pending);
@ -111,13 +110,15 @@ static void keystone_irq_handler(struct irq_desc *desc)
if (!virq)
dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n",
src, virq);
raw_spin_lock_irqsave(&kirq->wa_lock, wa_lock_flags);
generic_handle_irq(virq);
raw_spin_unlock_irqrestore(&kirq->wa_lock,
wa_lock_flags);
}
}
chained_irq_exit(irq_desc_get_chip(desc), desc);
dev_dbg(kirq->dev, "end irq %d\n", irq);
return IRQ_HANDLED;
}
static int keystone_irq_map(struct irq_domain *h, unsigned int virq,
@ -182,9 +183,16 @@ static int keystone_irq_probe(struct platform_device *pdev)
return -ENODEV;
}
raw_spin_lock_init(&kirq->wa_lock);
platform_set_drvdata(pdev, kirq);
irq_set_chained_handler_and_data(kirq->irq, keystone_irq_handler, kirq);
ret = request_irq(kirq->irq, keystone_irq_handler,
0, dev_name(dev), kirq);
if (ret) {
irq_domain_remove(kirq->irqd);
return ret;
}
/* clear all source bits */
keystone_irq_writel(kirq, ~0x0);
@ -199,6 +207,8 @@ static int keystone_irq_remove(struct platform_device *pdev)
struct keystone_irq_device *kirq = platform_get_drvdata(pdev);
int hwirq;
free_irq(kirq->irq, kirq);
for (hwirq = 0; hwirq < KEYSTONE_N_IRQ; hwirq++)
irq_dispose_mapping(irq_find_mapping(kirq->irqd, hwirq));

Просмотреть файл

@ -131,12 +131,16 @@ static struct irq_chip mxs_icoll_chip = {
.irq_ack = icoll_ack_irq,
.irq_mask = icoll_mask_irq,
.irq_unmask = icoll_unmask_irq,
.flags = IRQCHIP_MASK_ON_SUSPEND |
IRQCHIP_SKIP_SET_WAKE,
};
static struct irq_chip asm9260_icoll_chip = {
.irq_ack = icoll_ack_irq,
.irq_mask = asm9260_mask_irq,
.irq_unmask = asm9260_unmask_irq,
.flags = IRQCHIP_MASK_ON_SUSPEND |
IRQCHIP_SKIP_SET_WAKE,
};
asmlinkage void __exception_irq_entry icoll_handle_irq(struct pt_regs *regs)

Просмотреть файл

@ -612,8 +612,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
}
memset(msg->msg + msg->len, 0, sizeof(msg->msg) - msg->len);
if (msg->len == 1) {
if (cec_msg_initiator(msg) != 0xf ||
cec_msg_destination(msg) == 0xf) {
if (cec_msg_destination(msg) == 0xf) {
dprintk(1, "cec_transmit_msg: invalid poll message\n");
return -EINVAL;
}
@ -638,7 +637,7 @@ int cec_transmit_msg_fh(struct cec_adapter *adap, struct cec_msg *msg,
dprintk(1, "cec_transmit_msg: destination is the adapter itself\n");
return -EINVAL;
}
if (cec_msg_initiator(msg) != 0xf &&
if (msg->len > 1 && adap->is_configured &&
!cec_has_log_addr(adap, cec_msg_initiator(msg))) {
dprintk(1, "cec_transmit_msg: initiator has unknown logical address %d\n",
cec_msg_initiator(msg));
@ -1072,7 +1071,7 @@ static int cec_config_log_addr(struct cec_adapter *adap,
/* Send poll message */
msg.len = 1;
msg.msg[0] = 0xf0 | log_addr;
msg.msg[0] = (log_addr << 4) | log_addr;
err = cec_transmit_msg_fh(adap, &msg, NULL, true);
/*

Просмотреть файл

@ -218,22 +218,30 @@ static int smsusb_start_streaming(struct smsusb_device_t *dev)
static int smsusb_sendrequest(void *context, void *buffer, size_t size)
{
struct smsusb_device_t *dev = (struct smsusb_device_t *) context;
struct sms_msg_hdr *phdr = (struct sms_msg_hdr *) buffer;
int dummy;
struct sms_msg_hdr *phdr;
int dummy, ret;
if (dev->state != SMSUSB_ACTIVE) {
pr_debug("Device not active yet\n");
return -ENOENT;
}
phdr = kmalloc(size, GFP_KERNEL);
if (!phdr)
return -ENOMEM;
memcpy(phdr, buffer, size);
pr_debug("sending %s(%d) size: %d\n",
smscore_translate_msg(phdr->msg_type), phdr->msg_type,
phdr->msg_length);
smsendian_handle_tx_message((struct sms_msg_data *) phdr);
smsendian_handle_message_header((struct sms_msg_hdr *)buffer);
return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
buffer, size, &dummy, 1000);
smsendian_handle_message_header((struct sms_msg_hdr *)phdr);
ret = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2),
phdr, size, &dummy, 1000);
kfree(phdr);
return ret;
}
static char *smsusb1_fw_lkup[] = {

Просмотреть файл

@ -2910,6 +2910,7 @@ static void set_multicast_list(struct net_device *ndev)
struct netdev_hw_addr *ha;
unsigned int i, bit, data, crc, tmp;
unsigned char hash;
unsigned int hash_high = 0, hash_low = 0;
if (ndev->flags & IFF_PROMISC) {
tmp = readl(fep->hwp + FEC_R_CNTRL);
@ -2932,11 +2933,7 @@ static void set_multicast_list(struct net_device *ndev)
return;
}
/* Clear filter and add the addresses in hash register
*/
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
/* Add the addresses in hash register */
netdev_for_each_mc_addr(ha, ndev) {
/* calculate crc32 value of mac address */
crc = 0xffffffff;
@ -2954,16 +2951,14 @@ static void set_multicast_list(struct net_device *ndev)
*/
hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
if (hash > 31) {
tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
tmp |= 1 << (hash - 32);
writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
} else {
tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
tmp |= 1 << hash;
writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
}
if (hash > 31)
hash_high |= 1 << (hash - 32);
else
hash_low |= 1 << hash;
}
writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
}
/* Set a MAC change in hardware. */

Просмотреть файл

@ -189,9 +189,10 @@ static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
}
ltb->map_id = adapter->map_id;
adapter->map_id++;
init_completion(&adapter->fw_done);
send_request_map(adapter, ltb->addr,
ltb->size, ltb->map_id);
init_completion(&adapter->fw_done);
wait_for_completion(&adapter->fw_done);
return 0;
}
@ -505,7 +506,7 @@ rx_pool_alloc_failed:
adapter->rx_pool = NULL;
rx_pool_arr_alloc_failed:
for (i = 0; i < adapter->req_rx_queues; i++)
napi_enable(&adapter->napi[i]);
napi_disable(&adapter->napi[i]);
alloc_napi_failed:
return -ENOMEM;
}
@ -1126,10 +1127,10 @@ static void ibmvnic_get_ethtool_stats(struct net_device *dev,
crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
crq.request_statistics.len =
cpu_to_be32(sizeof(struct ibmvnic_statistics));
ibmvnic_send_crq(adapter, &crq);
/* Wait for data to be written */
init_completion(&adapter->stats_done);
ibmvnic_send_crq(adapter, &crq);
wait_for_completion(&adapter->stats_done);
for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
@ -1501,7 +1502,7 @@ static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
adapter->req_rx_queues = adapter->opt_rx_comp_queues;
adapter->req_rx_add_queues = adapter->max_rx_add_queues;
adapter->req_mtu = adapter->max_mtu;
adapter->req_mtu = adapter->netdev->mtu + ETH_HLEN;
}
total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
@ -2190,12 +2191,12 @@ static void handle_error_info_rsp(union ibmvnic_crq *crq,
if (!found) {
dev_err(dev, "Couldn't find error id %x\n",
crq->request_error_rsp.error_id);
be32_to_cpu(crq->request_error_rsp.error_id));
return;
}
dev_err(dev, "Detailed info for error id %x:",
crq->request_error_rsp.error_id);
be32_to_cpu(crq->request_error_rsp.error_id));
for (i = 0; i < error_buff->len; i++) {
pr_cont("%02x", (int)error_buff->buff[i]);
@ -2274,8 +2275,8 @@ static void handle_error_indication(union ibmvnic_crq *crq,
dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
crq->error_indication.
flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
crq->error_indication.error_id,
crq->error_indication.error_cause);
be32_to_cpu(crq->error_indication.error_id),
be16_to_cpu(crq->error_indication.error_cause));
error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
if (!error_buff)
@ -2393,10 +2394,10 @@ static void handle_request_cap_rsp(union ibmvnic_crq *crq,
case PARTIALSUCCESS:
dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
*req_value,
(long int)be32_to_cpu(crq->request_capability_rsp.
(long int)be64_to_cpu(crq->request_capability_rsp.
number), name);
release_sub_crqs_no_irqs(adapter);
*req_value = be32_to_cpu(crq->request_capability_rsp.number);
*req_value = be64_to_cpu(crq->request_capability_rsp.number);
init_sub_crqs(adapter, 1);
return;
default:
@ -2631,12 +2632,12 @@ static void handle_query_cap_rsp(union ibmvnic_crq *crq,
break;
case MIN_MTU:
adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
netdev->min_mtu = adapter->min_mtu;
netdev->min_mtu = adapter->min_mtu - ETH_HLEN;
netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
break;
case MAX_MTU:
adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
netdev->max_mtu = adapter->max_mtu;
netdev->max_mtu = adapter->max_mtu - ETH_HLEN;
netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
break;
case MAX_MULTICAST_FILTERS:
@ -2804,9 +2805,9 @@ static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
ibmvnic_send_crq(adapter, &crq);
init_completion(&adapter->fw_done);
ibmvnic_send_crq(adapter, &crq);
wait_for_completion(&adapter->fw_done);
if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
@ -3586,9 +3587,9 @@ static int ibmvnic_dump_show(struct seq_file *seq, void *v)
memset(&crq, 0, sizeof(crq));
crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
ibmvnic_send_crq(adapter, &crq);
init_completion(&adapter->fw_done);
ibmvnic_send_crq(adapter, &crq);
wait_for_completion(&adapter->fw_done);
seq_write(seq, adapter->dump_data, adapter->dump_data_size);
@ -3634,8 +3635,8 @@ static void handle_crq_init_rsp(struct work_struct *work)
}
}
send_version_xchg(adapter);
reinit_completion(&adapter->init_done);
send_version_xchg(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
dev_err(dev, "Passive init timeout\n");
goto task_failed;
@ -3645,9 +3646,9 @@ static void handle_crq_init_rsp(struct work_struct *work)
if (adapter->renegotiate) {
adapter->renegotiate = false;
release_sub_crqs_no_irqs(adapter);
send_cap_queries(adapter);
reinit_completion(&adapter->init_done);
send_cap_queries(adapter);
if (!wait_for_completion_timeout(&adapter->init_done,
timeout)) {
dev_err(dev, "Passive init timeout\n");
@ -3661,9 +3662,7 @@ static void handle_crq_init_rsp(struct work_struct *work)
goto task_failed;
netdev->real_num_tx_queues = adapter->req_tx_queues;
netdev->mtu = adapter->req_mtu;
netdev->min_mtu = adapter->min_mtu;
netdev->max_mtu = adapter->max_mtu;
netdev->mtu = adapter->req_mtu - ETH_HLEN;
if (adapter->failover) {
adapter->failover = false;
@ -3777,9 +3776,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
adapter->debugfs_dump = ent;
}
}
ibmvnic_send_crq_init(adapter);
init_completion(&adapter->init_done);
ibmvnic_send_crq_init(adapter);
if (!wait_for_completion_timeout(&adapter->init_done, timeout))
return 0;
@ -3787,9 +3786,9 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
if (adapter->renegotiate) {
adapter->renegotiate = false;
release_sub_crqs_no_irqs(adapter);
send_cap_queries(adapter);
reinit_completion(&adapter->init_done);
send_cap_queries(adapter);
if (!wait_for_completion_timeout(&adapter->init_done,
timeout))
return 0;
@ -3803,7 +3802,7 @@ static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
}
netdev->real_num_tx_queues = adapter->req_tx_queues;
netdev->mtu = adapter->req_mtu;
netdev->mtu = adapter->req_mtu - ETH_HLEN;
rc = register_netdev(netdev);
if (rc) {

Просмотреть файл

@ -1240,10 +1240,14 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
preempt_disable();
tcf_exts_to_list(f->exts, &actions);
list_for_each_entry(a, &actions, list)
tcf_action_stats_update(a, bytes, packets, lastuse);
preempt_enable();
return 0;
}

Просмотреть файл

@ -3207,7 +3207,7 @@ static int cpsw_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct net_device *ndev = platform_get_drvdata(pdev);
struct cpsw_common *cpsw = netdev_priv(ndev);
struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
/* Select default pin state */
pinctrl_pm_select_default_state(dev);

Просмотреть файл

@ -100,6 +100,14 @@
/* BUFFER_ALIGN(adr) calculates the number of bytes to the next alignment. */
#define BUFFER_ALIGN(adr) ((ALIGNMENT - ((u32) adr)) % ALIGNMENT)
#ifdef __BIG_ENDIAN
#define xemaclite_readl ioread32be
#define xemaclite_writel iowrite32be
#else
#define xemaclite_readl ioread32
#define xemaclite_writel iowrite32
#endif
/**
* struct net_local - Our private per device data
* @ndev: instance of the network device
@ -156,15 +164,15 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
u32 reg_data;
/* Enable the Tx interrupts for the first Buffer */
reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
__raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
xemaclite_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
drvdata->base_addr + XEL_TSR_OFFSET);
/* Enable the Rx interrupts for the first buffer */
__raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
xemaclite_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
/* Enable the Global Interrupt Enable */
__raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
}
/**
@ -179,16 +187,16 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
u32 reg_data;
/* Disable the Global Interrupt Enable */
__raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
xemaclite_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
/* Disable the Tx interrupts for the first buffer */
reg_data = __raw_readl(drvdata->base_addr + XEL_TSR_OFFSET);
__raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
reg_data = xemaclite_readl(drvdata->base_addr + XEL_TSR_OFFSET);
xemaclite_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
drvdata->base_addr + XEL_TSR_OFFSET);
/* Disable the Rx interrupts for the first buffer */
reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
__raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
reg_data = xemaclite_readl(drvdata->base_addr + XEL_RSR_OFFSET);
xemaclite_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
drvdata->base_addr + XEL_RSR_OFFSET);
}
@ -321,7 +329,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
byte_count = ETH_FRAME_LEN;
/* Check if the expected buffer is available */
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) == 0) {
@ -334,7 +342,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
addr = (void __iomem __force *)((u32 __force)addr ^
XEL_BUFFER_OFFSET);
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
if ((reg_data & (XEL_TSR_XMIT_BUSY_MASK |
XEL_TSR_XMIT_ACTIVE_MASK)) != 0)
@ -345,16 +353,16 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
/* Write the frame to the buffer */
xemaclite_aligned_write(data, (u32 __force *) addr, byte_count);
__raw_writel((byte_count & XEL_TPLR_LENGTH_MASK),
xemaclite_writel((byte_count & XEL_TPLR_LENGTH_MASK),
addr + XEL_TPLR_OFFSET);
/* Update the Tx Status Register to indicate that there is a
* frame to send. Set the XEL_TSR_XMIT_ACTIVE_MASK flag which
* is used by the interrupt handler to check whether a frame
* has been transmitted */
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
reg_data |= (XEL_TSR_XMIT_BUSY_MASK | XEL_TSR_XMIT_ACTIVE_MASK);
__raw_writel(reg_data, addr + XEL_TSR_OFFSET);
xemaclite_writel(reg_data, addr + XEL_TSR_OFFSET);
return 0;
}
@ -369,7 +377,7 @@ static int xemaclite_send_data(struct net_local *drvdata, u8 *data,
*
* Return: Total number of bytes received
*/
static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data, int maxlen)
{
void __iomem *addr;
u16 length, proto_type;
@ -379,7 +387,7 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
addr = (drvdata->base_addr + drvdata->next_rx_buf_to_use);
/* Verify which buffer has valid data */
reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) == XEL_RSR_RECV_DONE_MASK) {
if (drvdata->rx_ping_pong != 0)
@ -396,27 +404,28 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
return 0; /* No data was available */
/* Verify that buffer has valid data */
reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
if ((reg_data & XEL_RSR_RECV_DONE_MASK) !=
XEL_RSR_RECV_DONE_MASK)
return 0; /* No data was available */
}
/* Get the protocol type of the ethernet frame that arrived */
proto_type = ((ntohl(__raw_readl(addr + XEL_HEADER_OFFSET +
proto_type = ((ntohl(xemaclite_readl(addr + XEL_HEADER_OFFSET +
XEL_RXBUFF_OFFSET)) >> XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
/* Check if received ethernet frame is a raw ethernet frame
* or an IP packet or an ARP packet */
if (proto_type > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
if (proto_type > ETH_DATA_LEN) {
if (proto_type == ETH_P_IP) {
length = ((ntohl(__raw_readl(addr +
length = ((ntohl(xemaclite_readl(addr +
XEL_HEADER_IP_LENGTH_OFFSET +
XEL_RXBUFF_OFFSET)) >>
XEL_HEADER_SHIFT) &
XEL_RPLR_LENGTH_MASK);
length = min_t(u16, length, ETH_DATA_LEN);
length += ETH_HLEN + ETH_FCS_LEN;
} else if (proto_type == ETH_P_ARP)
@ -429,14 +438,17 @@ static u16 xemaclite_recv_data(struct net_local *drvdata, u8 *data)
/* Use the length in the frame, plus the header and trailer */
length = proto_type + ETH_HLEN + ETH_FCS_LEN;
if (WARN_ON(length > maxlen))
length = maxlen;
/* Read from the EmacLite device */
xemaclite_aligned_read((u32 __force *) (addr + XEL_RXBUFF_OFFSET),
data, length);
/* Acknowledge the frame */
reg_data = __raw_readl(addr + XEL_RSR_OFFSET);
reg_data = xemaclite_readl(addr + XEL_RSR_OFFSET);
reg_data &= ~XEL_RSR_RECV_DONE_MASK;
__raw_writel(reg_data, addr + XEL_RSR_OFFSET);
xemaclite_writel(reg_data, addr + XEL_RSR_OFFSET);
return length;
}
@ -463,14 +475,14 @@ static void xemaclite_update_address(struct net_local *drvdata,
xemaclite_aligned_write(address_ptr, (u32 __force *) addr, ETH_ALEN);
__raw_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
xemaclite_writel(ETH_ALEN, addr + XEL_TPLR_OFFSET);
/* Update the MAC address in the EmacLite */
reg_data = __raw_readl(addr + XEL_TSR_OFFSET);
__raw_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
reg_data = xemaclite_readl(addr + XEL_TSR_OFFSET);
xemaclite_writel(reg_data | XEL_TSR_PROG_MAC_ADDR, addr + XEL_TSR_OFFSET);
/* Wait for EmacLite to finish with the MAC address update */
while ((__raw_readl(addr + XEL_TSR_OFFSET) &
while ((xemaclite_readl(addr + XEL_TSR_OFFSET) &
XEL_TSR_PROG_MAC_ADDR) != 0)
;
}
@ -603,7 +615,7 @@ static void xemaclite_rx_handler(struct net_device *dev)
skb_reserve(skb, 2);
len = xemaclite_recv_data(lp, (u8 *) skb->data);
len = xemaclite_recv_data(lp, (u8 *) skb->data, len);
if (!len) {
dev->stats.rx_errors++;
@ -640,31 +652,31 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
u32 tx_status;
/* Check if there is Rx Data available */
if ((__raw_readl(base_addr + XEL_RSR_OFFSET) &
if ((xemaclite_readl(base_addr + XEL_RSR_OFFSET) &
XEL_RSR_RECV_DONE_MASK) ||
(__raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
(xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_RSR_OFFSET)
& XEL_RSR_RECV_DONE_MASK))
xemaclite_rx_handler(dev);
/* Check if the Transmission for the first buffer is completed */
tx_status = __raw_readl(base_addr + XEL_TSR_OFFSET);
tx_status = xemaclite_readl(base_addr + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
__raw_writel(tx_status, base_addr + XEL_TSR_OFFSET);
xemaclite_writel(tx_status, base_addr + XEL_TSR_OFFSET);
tx_complete = true;
}
/* Check if the Transmission for the second buffer is completed */
tx_status = __raw_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
tx_status = xemaclite_readl(base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
if (((tx_status & XEL_TSR_XMIT_BUSY_MASK) == 0) &&
(tx_status & XEL_TSR_XMIT_ACTIVE_MASK) != 0) {
tx_status &= ~XEL_TSR_XMIT_ACTIVE_MASK;
__raw_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
xemaclite_writel(tx_status, base_addr + XEL_BUFFER_OFFSET +
XEL_TSR_OFFSET);
tx_complete = true;
@ -698,7 +710,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
/* wait for the MDIO interface to not be busy or timeout
after some time.
*/
while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
while (xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) {
if (time_before_eq(end, jiffies)) {
WARN_ON(1);
@ -734,17 +746,17 @@ static int xemaclite_mdio_read(struct mii_bus *bus, int phy_id, int reg)
* MDIO Address register. Set the Status bit in the MDIO Control
* register to start a MDIO read transaction.
*/
ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
__raw_writel(XEL_MDIOADDR_OP_MASK |
ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
xemaclite_writel(XEL_MDIOADDR_OP_MASK |
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
lp->base_addr + XEL_MDIOADDR_OFFSET);
__raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET);
if (xemaclite_mdio_wait(lp))
return -ETIMEDOUT;
rc = __raw_readl(lp->base_addr + XEL_MDIORD_OFFSET);
rc = xemaclite_readl(lp->base_addr + XEL_MDIORD_OFFSET);
dev_dbg(&lp->ndev->dev,
"xemaclite_mdio_read(phy_id=%i, reg=%x) == %x\n",
@ -781,12 +793,12 @@ static int xemaclite_mdio_write(struct mii_bus *bus, int phy_id, int reg,
* Data register. Finally, set the Status bit in the MDIO Control
* register to start a MDIO write transaction.
*/
ctrl_reg = __raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
__raw_writel(~XEL_MDIOADDR_OP_MASK &
ctrl_reg = xemaclite_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET);
xemaclite_writel(~XEL_MDIOADDR_OP_MASK &
((phy_id << XEL_MDIOADDR_PHYADR_SHIFT) | reg),
lp->base_addr + XEL_MDIOADDR_OFFSET);
__raw_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
__raw_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
xemaclite_writel(val, lp->base_addr + XEL_MDIOWR_OFFSET);
xemaclite_writel(ctrl_reg | XEL_MDIOCTRL_MDIOSTS_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET);
return 0;
@ -834,7 +846,7 @@ static int xemaclite_mdio_setup(struct net_local *lp, struct device *dev)
/* Enable the MDIO bus by asserting the enable bit in MDIO Control
* register.
*/
__raw_writel(XEL_MDIOCTRL_MDIOEN_MASK,
xemaclite_writel(XEL_MDIOCTRL_MDIOEN_MASK,
lp->base_addr + XEL_MDIOCTRL_OFFSET);
bus = mdiobus_alloc();
@ -1126,8 +1138,8 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
}
/* Clear the Tx CSR's in case this is a restart */
__raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
__raw_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
xemaclite_writel(0, lp->base_addr + XEL_TSR_OFFSET);
xemaclite_writel(0, lp->base_addr + XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
/* Set the MAC address in the EmacLite device */
xemaclite_update_address(lp, ndev->dev_addr);

Просмотреть файл

@ -113,10 +113,10 @@ struct xenvif_stats {
* A subset of struct net_device_stats that contains only the
* fields that are updated in netback.c for each queue.
*/
unsigned int rx_bytes;
unsigned int rx_packets;
unsigned int tx_bytes;
unsigned int tx_packets;
u64 rx_bytes;
u64 rx_packets;
u64 tx_bytes;
u64 tx_packets;
/* Additional stats used by xenvif */
unsigned long rx_gso_checksum_fixup;

Просмотреть файл

@ -221,10 +221,10 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
unsigned long rx_bytes = 0;
unsigned long rx_packets = 0;
unsigned long tx_bytes = 0;
unsigned long tx_packets = 0;
u64 rx_bytes = 0;
u64 rx_packets = 0;
u64 tx_bytes = 0;
u64 tx_packets = 0;
unsigned int index;
spin_lock(&vif->lock);

Просмотреть файл

@ -433,6 +433,17 @@ static int pcie_pme_resume(struct pcie_device *srv)
return 0;
}
/**
* pcie_pme_remove - Prepare PCIe PME service device for removal.
* @srv - PCIe service device to remove.
*/
static void pcie_pme_remove(struct pcie_device *srv)
{
pcie_pme_suspend(srv);
free_irq(srv->irq, srv);
kfree(get_service_data(srv));
}
static struct pcie_port_service_driver pcie_pme_driver = {
.name = "pcie_pme",
.port_type = PCI_EXP_TYPE_ROOT_PORT,
@ -441,6 +452,7 @@ static struct pcie_port_service_driver pcie_pme_driver = {
.probe = pcie_pme_probe,
.suspend = pcie_pme_suspend,
.resume = pcie_pme_resume,
.remove = pcie_pme_remove,
};
/**

Просмотреть файл

@ -1583,7 +1583,7 @@ out:
int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req = NULL;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@ -1612,7 +1612,7 @@ int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *wka_port)
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
if (req && !IS_ERR(req))
if (!retval)
zfcp_dbf_rec_run_wka("fsowp_1", wka_port, req->req_id);
return retval;
}
@ -1638,7 +1638,7 @@ static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req)
int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
{
struct zfcp_qdio *qdio = wka_port->adapter->qdio;
struct zfcp_fsf_req *req = NULL;
struct zfcp_fsf_req *req;
int retval = -EIO;
spin_lock_irq(&qdio->req_q_lock);
@ -1667,7 +1667,7 @@ int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *wka_port)
zfcp_fsf_req_free(req);
out:
spin_unlock_irq(&qdio->req_q_lock);
if (req && !IS_ERR(req))
if (!retval)
zfcp_dbf_rec_run_wka("fscwp_1", wka_port, req->req_id);
return retval;
}

Просмотреть файл

@ -50,9 +50,13 @@ struct aac_common aac_config = {
static inline int aac_is_msix_mode(struct aac_dev *dev)
{
u32 status;
u32 status = 0;
if (dev->pdev->device == PMC_DEVICE_S6 ||
dev->pdev->device == PMC_DEVICE_S7 ||
dev->pdev->device == PMC_DEVICE_S8) {
status = src_readl(dev, MUnit.OMR);
}
return (status & AAC_INT_MODE_MSIX);
}

Просмотреть файл

@ -51,6 +51,7 @@
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/interrupt.h>
#include <linux/aer.h>
#include <linux/raid_class.h>
@ -4657,6 +4658,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
struct MPT3SAS_DEVICE *sas_device_priv_data;
u32 response_code = 0;
unsigned long flags;
unsigned int sector_sz;
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
scmd = _scsih_scsi_lookup_get_clear(ioc, smid);
@ -4715,6 +4717,20 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
}
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
/* In case of bogus fw or device, we could end up having
* unaligned partial completion. We can force alignment here,
* then scsi-ml does not need to handle this misbehavior.
*/
sector_sz = scmd->device->sector_size;
if (unlikely(scmd->request->cmd_type == REQ_TYPE_FS && sector_sz &&
xfer_cnt % sector_sz)) {
sdev_printk(KERN_INFO, scmd->device,
"unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
xfer_cnt, sector_sz);
xfer_cnt = round_down(xfer_cnt, sector_sz);
}
scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
@ -8746,6 +8762,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
switch (hba_mpi_version) {
case MPI2_VERSION:
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
/* Use mpt2sas driver host template for SAS 2.0 HBA's */
shost = scsi_host_alloc(&mpt2sas_driver_template,
sizeof(struct MPT3SAS_ADAPTER));

Просмотреть файл

@ -3242,7 +3242,7 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
* from a probe failure context.
*/
if (!ha->rsp_q_map || !ha->rsp_q_map[0])
return;
goto free_irqs;
rsp = ha->rsp_q_map[0];
if (ha->flags.msix_enabled) {
@ -3262,6 +3262,7 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
free_irq(pci_irq_vector(ha->pdev, 0), rsp);
}
free_irqs:
pci_free_irq_vectors(ha->pdev);
}

Просмотреть файл

@ -1616,7 +1616,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
/* Don't abort commands in adapter during EEH
* recovery as it's not accessible/responding.
*/
if (!ha->flags.eeh_busy) {
if (GET_CMD_SP(sp) && !ha->flags.eeh_busy) {
/* Get a reference to the sp and drop the lock.
* The reference ensures this sp->done() call
* - and not the call in qla2xxx_eh_abort() -

Просмотреть файл

@ -1024,6 +1024,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
unsigned long buf_offset;
unsigned long current_buf_start;
unsigned long start_byte;
unsigned long prev_start_byte;
unsigned long working_bytes = total_out - buf_start;
unsigned long bytes;
char *kaddr;
@ -1071,9 +1072,16 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
if (!bio->bi_iter.bi_size)
return 0;
bvec = bio_iter_iovec(bio, bio->bi_iter);
prev_start_byte = start_byte;
start_byte = page_offset(bvec.bv_page) - disk_start;
/*
* We need to make sure we're only adjusting
* our offset into compression working buffer when
* we're switching pages. Otherwise we can incorrectly
* keep copying when we were actually done.
*/
if (start_byte != prev_start_byte) {
/*
* make sure our new page is covered by this
* working buffer
@ -1093,6 +1101,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
current_buf_start = buf_start + buf_offset;
}
}
}
return 1;
}

Просмотреть файл

@ -5653,6 +5653,10 @@ long btrfs_ioctl(struct file *file, unsigned int
#ifdef CONFIG_COMPAT
long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
/*
* These all access 32-bit values anyway so no further
* handling is necessary.
*/
switch (cmd) {
case FS_IOC32_GETFLAGS:
cmd = FS_IOC_GETFLAGS;
@ -5663,8 +5667,6 @@ long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case FS_IOC32_GETVERSION:
cmd = FS_IOC_GETVERSION;
break;
default:
return -ENOIOCTLCMD;
}
return btrfs_ioctl(file, cmd, (unsigned long) compat_ptr(arg));

Просмотреть файл

@ -399,6 +399,10 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
spin_lock(&fiq->waitq.lock);
if (test_bit(FR_FINISHED, &req->flags)) {
spin_unlock(&fiq->waitq.lock);
return;
}
if (list_empty(&req->intr_entry)) {
list_add_tail(&req->intr_entry, &fiq->interrupts);
wake_up_locked(&fiq->waitq);
@ -1372,6 +1376,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
* code can Oops if the buffer persists after module unload.
*/
bufs[page_nr].ops = &nosteal_pipe_buf_ops;
bufs[page_nr].flags = 0;
ret = add_to_pipe(pipe, &bufs[page_nr++]);
if (unlikely(ret < 0))
break;

Просмотреть файл

@ -204,6 +204,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
buf->len = spd->partial[page_nr].len;
buf->private = spd->partial[page_nr].private;
buf->ops = spd->ops;
buf->flags = 0;
pipe->nrbufs++;
page_nr++;

Просмотреть файл

@ -21,20 +21,19 @@ struct cgroup_bpf {
*/
struct bpf_prog *prog[MAX_BPF_ATTACH_TYPE];
struct bpf_prog __rcu *effective[MAX_BPF_ATTACH_TYPE];
bool disallow_override[MAX_BPF_ATTACH_TYPE];
};
void cgroup_bpf_put(struct cgroup *cgrp);
void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent);
void __cgroup_bpf_update(struct cgroup *cgrp,
struct cgroup *parent,
struct bpf_prog *prog,
enum bpf_attach_type type);
int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
struct bpf_prog *prog, enum bpf_attach_type type,
bool overridable);
/* Wrapper for __cgroup_bpf_update() protected by cgroup_mutex */
void cgroup_bpf_update(struct cgroup *cgrp,
struct bpf_prog *prog,
enum bpf_attach_type type);
int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, bool overridable);
int __cgroup_bpf_run_filter_skb(struct sock *sk,
struct sk_buff *skb,

Просмотреть файл

@ -123,6 +123,12 @@ enum bpf_attach_type {
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
* to the given target_fd cgroup the descendent cgroup will be able to
* override effective bpf program that was inherited from this cgroup
*/
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
#define BPF_PSEUDO_MAP_FD 1
/* flags for BPF_MAP_UPDATE_ELEM command */
@ -178,6 +184,7 @@ union bpf_attr {
__u32 target_fd; /* container object to attach to */
__u32 attach_bpf_fd; /* eBPF program to attach */
__u32 attach_type;
__u32 attach_flags;
};
} __attribute__((aligned(8)));

Просмотреть файл

@ -9,9 +9,8 @@
#include <linux/types.h>
#include <linux/socket.h>
#ifndef __KERNEL__
#include <netinet/in.h>
#endif
#include <linux/in.h>
#include <linux/in6.h>
#define IPPROTO_L2TP 115
@ -31,7 +30,7 @@ struct sockaddr_l2tpip {
__u32 l2tp_conn_id; /* Connection ID of tunnel */
/* Pad to size of `struct sockaddr'. */
unsigned char __pad[sizeof(struct sockaddr) -
unsigned char __pad[__SOCK_SIZE__ -
sizeof(__kernel_sa_family_t) -
sizeof(__be16) - sizeof(struct in_addr) -
sizeof(__u32)];

Просмотреть файл

@ -362,8 +362,8 @@ enum v4l2_quantization {
/*
* The default for R'G'B' quantization is always full range, except
* for the BT2020 colorspace. For Y'CbCr the quantization is always
* limited range, except for COLORSPACE_JPEG, SRGB, ADOBERGB,
* XV601 or XV709: those are full range.
* limited range, except for COLORSPACE_JPEG, XV601 or XV709: those
* are full range.
*/
V4L2_QUANTIZATION_DEFAULT = 0,
V4L2_QUANTIZATION_FULL_RANGE = 1,
@ -379,8 +379,7 @@ enum v4l2_quantization {
(((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \
V4L2_QUANTIZATION_LIM_RANGE : \
(((is_rgb_or_hsv) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \
(ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) || \
(colsp) == V4L2_COLORSPACE_ADOBERGB || (colsp) == V4L2_COLORSPACE_SRGB ? \
(ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) ? \
V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
enum v4l2_priority {

Просмотреть файл

@ -52,6 +52,7 @@ void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
e = rcu_dereference_protected(parent->bpf.effective[type],
lockdep_is_held(&cgroup_mutex));
rcu_assign_pointer(cgrp->bpf.effective[type], e);
cgrp->bpf.disallow_override[type] = parent->bpf.disallow_override[type];
}
}
@ -82,30 +83,63 @@ void cgroup_bpf_inherit(struct cgroup *cgrp, struct cgroup *parent)
*
* Must be called with cgroup_mutex held.
*/
void __cgroup_bpf_update(struct cgroup *cgrp,
struct cgroup *parent,
struct bpf_prog *prog,
enum bpf_attach_type type)
int __cgroup_bpf_update(struct cgroup *cgrp, struct cgroup *parent,
struct bpf_prog *prog, enum bpf_attach_type type,
bool new_overridable)
{
struct bpf_prog *old_prog, *effective;
struct bpf_prog *old_prog, *effective = NULL;
struct cgroup_subsys_state *pos;
bool overridable = true;
old_prog = xchg(cgrp->bpf.prog + type, prog);
if (parent) {
overridable = !parent->bpf.disallow_override[type];
effective = rcu_dereference_protected(parent->bpf.effective[type],
lockdep_is_held(&cgroup_mutex));
}
effective = (!prog && parent) ?
rcu_dereference_protected(parent->bpf.effective[type],
lockdep_is_held(&cgroup_mutex)) :
prog;
if (prog && effective && !overridable)
/* if parent has non-overridable prog attached, disallow
* attaching new programs to descendent cgroup
*/
return -EPERM;
if (prog && effective && overridable != new_overridable)
/* if parent has overridable prog attached, only
* allow overridable programs in descendent cgroup
*/
return -EPERM;
old_prog = cgrp->bpf.prog[type];
if (prog) {
overridable = new_overridable;
effective = prog;
if (old_prog &&
cgrp->bpf.disallow_override[type] == new_overridable)
/* disallow attaching non-overridable on top
* of existing overridable in this cgroup
* and vice versa
*/
return -EPERM;
}
if (!prog && !old_prog)
/* report error when trying to detach and nothing is attached */
return -ENOENT;
cgrp->bpf.prog[type] = prog;
css_for_each_descendant_pre(pos, &cgrp->self) {
struct cgroup *desc = container_of(pos, struct cgroup, self);
/* skip the subtree if the descendant has its own program */
if (desc->bpf.prog[type] && desc != cgrp)
if (desc->bpf.prog[type] && desc != cgrp) {
pos = css_rightmost_descendant(pos);
else
} else {
rcu_assign_pointer(desc->bpf.effective[type],
effective);
desc->bpf.disallow_override[type] = !overridable;
}
}
if (prog)
@ -115,6 +149,7 @@ void __cgroup_bpf_update(struct cgroup *cgrp,
bpf_prog_put(old_prog);
static_branch_dec(&cgroup_bpf_enabled_key);
}
return 0;
}
/**

Просмотреть файл

@ -935,13 +935,14 @@ static int bpf_obj_get(const union bpf_attr *attr)
#ifdef CONFIG_CGROUP_BPF
#define BPF_PROG_ATTACH_LAST_FIELD attach_type
#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
static int bpf_prog_attach(const union bpf_attr *attr)
{
enum bpf_prog_type ptype;
struct bpf_prog *prog;
struct cgroup *cgrp;
enum bpf_prog_type ptype;
int ret;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@ -949,6 +950,9 @@ static int bpf_prog_attach(const union bpf_attr *attr)
if (CHECK_ATTR(BPF_PROG_ATTACH))
return -EINVAL;
if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
return -EINVAL;
switch (attr->attach_type) {
case BPF_CGROUP_INET_INGRESS:
case BPF_CGROUP_INET_EGRESS:
@ -971,10 +975,13 @@ static int bpf_prog_attach(const union bpf_attr *attr)
return PTR_ERR(cgrp);
}
cgroup_bpf_update(cgrp, prog, attr->attach_type);
ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
if (ret)
bpf_prog_put(prog);
cgroup_put(cgrp);
return 0;
return ret;
}
#define BPF_PROG_DETACH_LAST_FIELD attach_type
@ -982,6 +989,7 @@ static int bpf_prog_attach(const union bpf_attr *attr)
static int bpf_prog_detach(const union bpf_attr *attr)
{
struct cgroup *cgrp;
int ret;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@ -997,7 +1005,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
if (IS_ERR(cgrp))
return PTR_ERR(cgrp);
cgroup_bpf_update(cgrp, NULL, attr->attach_type);
ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
cgroup_put(cgrp);
break;
@ -1005,7 +1013,7 @@ static int bpf_prog_detach(const union bpf_attr *attr)
return -EINVAL;
}
return 0;
return ret;
}
#endif /* CONFIG_CGROUP_BPF */

Просмотреть файл

@ -6498,15 +6498,16 @@ static __init int cgroup_namespaces_init(void)
subsys_initcall(cgroup_namespaces_init);
#ifdef CONFIG_CGROUP_BPF
void cgroup_bpf_update(struct cgroup *cgrp,
struct bpf_prog *prog,
enum bpf_attach_type type)
int cgroup_bpf_update(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, bool overridable)
{
struct cgroup *parent = cgroup_parent(cgrp);
int ret;
mutex_lock(&cgroup_mutex);
__cgroup_bpf_update(cgrp, parent, prog, type);
ret = __cgroup_bpf_update(cgrp, parent, prog, type, overridable);
mutex_unlock(&cgroup_mutex);
return ret;
}
#endif /* CONFIG_CGROUP_BPF */

Просмотреть файл

@ -3487,12 +3487,13 @@ struct perf_read_data {
int ret;
};
static int find_cpu_to_read(struct perf_event *event, int local_cpu)
static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
{
int event_cpu = event->oncpu;
u16 local_pkg, event_pkg;
if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
int local_cpu = smp_processor_id();
event_pkg = topology_physical_package_id(event_cpu);
local_pkg = topology_physical_package_id(local_cpu);
@ -3624,7 +3625,7 @@ u64 perf_event_read_local(struct perf_event *event)
static int perf_event_read(struct perf_event *event, bool group)
{
int ret = 0, cpu_to_read, local_cpu;
int event_cpu, ret = 0;
/*
* If event is enabled and currently active on a CPU, update the
@ -3637,21 +3638,25 @@ static int perf_event_read(struct perf_event *event, bool group)
.ret = 0,
};
local_cpu = get_cpu();
cpu_to_read = find_cpu_to_read(event, local_cpu);
put_cpu();
event_cpu = READ_ONCE(event->oncpu);
if ((unsigned)event_cpu >= nr_cpu_ids)
return 0;
preempt_disable();
event_cpu = __perf_event_read_cpu(event, event_cpu);
/*
* Purposely ignore the smp_call_function_single() return
* value.
*
* If event->oncpu isn't a valid CPU it means the event got
* If event_cpu isn't a valid CPU it means the event got
* scheduled out and that will have updated the event count.
*
* Therefore, either way, we'll have an up-to-date event count
* after this.
*/
(void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1);
(void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
preempt_enable();
ret = data.ret;
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;

Просмотреть файл

@ -18,10 +18,8 @@ void print_stack_trace(struct stack_trace *trace, int spaces)
if (WARN_ON(!trace->entries))
return;
for (i = 0; i < trace->nr_entries; i++) {
printk("%*c", 1 + spaces, ' ');
print_ip_sym(trace->entries[i]);
}
for (i = 0; i < trace->nr_entries; i++)
printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]);
}
EXPORT_SYMBOL_GPL(print_stack_trace);
@ -29,7 +27,6 @@ int snprint_stack_trace(char *buf, size_t size,
struct stack_trace *trace, int spaces)
{
int i;
unsigned long ip;
int generated;
int total = 0;
@ -37,9 +34,8 @@ int snprint_stack_trace(char *buf, size_t size,
return 0;
for (i = 0; i < trace->nr_entries; i++) {
ip = trace->entries[i];
generated = snprintf(buf, size, "%*c[<%p>] %pS\n",
1 + spaces, ' ', (void *) ip, (void *) ip);
generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
(void *)trace->entries[i]);
total += generated;

Просмотреть файл

@ -725,6 +725,11 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
*/
if (delta == 0) {
tick_nohz_restart(ts, now);
/*
* Make sure next tick stop doesn't get fooled by past
* clock deadline
*/
ts->next_tick = 0;
goto out;
}
}

Просмотреть файл

@ -2923,6 +2923,7 @@ static void neigh_proc_update(struct ctl_table *ctl, int write)
return;
set_bit(index, p->data_state);
if (index == NEIGH_VAR_DELAY_PROBE_TIME)
call_netevent_notifiers(NETEVENT_DELAY_PROBE_TIME_UPDATE, p);
if (!dev) /* NULL dev means this is default value */
neigh_copy_dflt_parms(net, p, index);

Просмотреть файл

@ -1263,7 +1263,7 @@ void __init arp_init(void)
/*
* ax25 -> ASCII conversion
*/
static char *ax2asc2(ax25_address *a, char *buf)
static void ax2asc2(ax25_address *a, char *buf)
{
char c, *s;
int n;
@ -1285,10 +1285,10 @@ static char *ax2asc2(ax25_address *a, char *buf)
*s++ = n + '0';
*s++ = '\0';
if (*buf == '\0' || *buf == '-')
return "*";
return buf;
if (*buf == '\0' || *buf == '-') {
buf[0] = '*';
buf[1] = '\0';
}
}
#endif /* CONFIG_AX25 */
@ -1322,7 +1322,7 @@ static void arp_format_neigh_entry(struct seq_file *seq,
}
#endif
sprintf(tbuf, "%pI4", n->primary_key);
seq_printf(seq, "%-16s 0x%-10x0x%-10x%s * %s\n",
seq_printf(seq, "%-16s 0x%-10x0x%-10x%-17s * %s\n",
tbuf, hatype, arp_state_to_flags(n), hbuffer, dev->name);
read_unlock(&n->lock);
}

Просмотреть файл

@ -117,7 +117,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
(fwmark > 0 && skb->mark == fwmark)) &&
(full || tp->snd_cwnd != tcp_probe.lastcwnd)) {
spin_lock(&tcp_probe.lock);
spin_lock_bh(&tcp_probe.lock);
/* If log fills, just silently drop */
if (tcp_probe_avail() > 1) {
struct tcp_log *p = tcp_probe.log + tcp_probe.head;
@ -157,7 +157,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
}
tcp_probe.lastcwnd = tp->snd_cwnd;
spin_unlock(&tcp_probe.lock);
spin_unlock_bh(&tcp_probe.lock);
wake_up(&tcp_probe.wait);
}

Просмотреть файл

@ -167,18 +167,22 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
if (np->sndflow)
fl6_flowlabel = usin->sin6_flowinfo & IPV6_FLOWINFO_MASK;
addr_type = ipv6_addr_type(&usin->sin6_addr);
if (addr_type == IPV6_ADDR_ANY) {
if (ipv6_addr_any(&usin->sin6_addr)) {
/*
* connect to self
*/
usin->sin6_addr.s6_addr[15] = 0x01;
if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
&usin->sin6_addr);
else
usin->sin6_addr = in6addr_loopback;
}
addr_type = ipv6_addr_type(&usin->sin6_addr);
daddr = &usin->sin6_addr;
if (addr_type == IPV6_ADDR_MAPPED) {
if (addr_type & IPV6_ADDR_MAPPED) {
struct sockaddr_in sin;
if (__ipv6_only_sock(sk)) {

Просмотреть файл

@ -1022,6 +1022,9 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk,
}
}
#endif
if (ipv6_addr_v4mapped(&fl6->saddr) &&
!(ipv6_addr_v4mapped(&fl6->daddr) || ipv6_addr_any(&fl6->daddr)))
return -EAFNOSUPPORT;
return 0;

Просмотреть файл

@ -149,8 +149,13 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
* connect() to INADDR_ANY means loopback (BSD'ism).
*/
if (ipv6_addr_any(&usin->sin6_addr))
usin->sin6_addr.s6_addr[15] = 0x1;
if (ipv6_addr_any(&usin->sin6_addr)) {
if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
&usin->sin6_addr);
else
usin->sin6_addr = in6addr_loopback;
}
addr_type = ipv6_addr_type(&usin->sin6_addr);
@ -189,7 +194,7 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
* TCP over IPv4
*/
if (addr_type == IPV6_ADDR_MAPPED) {
if (addr_type & IPV6_ADDR_MAPPED) {
u32 exthdrlen = icsk->icsk_ext_hdr_len;
struct sockaddr_in sin;

Просмотреть файл

@ -1046,6 +1046,10 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
daddr = &sin6->sin6_addr;
if (ipv6_addr_any(daddr) &&
ipv6_addr_v4mapped(&np->saddr))
ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
daddr);
break;
case AF_INET:
goto do_udp_sendmsg;

Просмотреть файл

@ -1044,9 +1044,11 @@ wait_for_memory:
} else {
/* Message not complete, save state */
partial_message:
if (head) {
kcm->seq_skb = head;
kcm_tx_msg(head)->last_skb = skb;
}
}
KCM_STATS_ADD(kcm->stats.tx_bytes, copied);

Просмотреть файл

@ -821,7 +821,10 @@ void llc_conn_handler(struct llc_sap *sap, struct sk_buff *skb)
* another trick required to cope with how the PROCOM state
* machine works. -acme
*/
skb_orphan(skb);
sock_hold(sk);
skb->sk = sk;
skb->destructor = sock_efree;
}
if (!sock_owned_by_user(sk))
llc_conn_rcv(sk, skb);

Просмотреть файл

@ -290,7 +290,10 @@ static void llc_sap_rcv(struct llc_sap *sap, struct sk_buff *skb,
ev->type = LLC_SAP_EV_TYPE_PDU;
ev->reason = 0;
skb_orphan(skb);
sock_hold(sk);
skb->sk = sk;
skb->destructor = sock_efree;
llc_sap_state_process(sap, skb);
}

Просмотреть файл

@ -1627,6 +1627,7 @@ static void fanout_release_data(struct packet_fanout *f)
static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
{
struct packet_rollover *rollover = NULL;
struct packet_sock *po = pkt_sk(sk);
struct packet_fanout *f, *match;
u8 type = type_flags & 0xff;
@ -1649,23 +1650,28 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
return -EINVAL;
}
if (!po->running)
return -EINVAL;
mutex_lock(&fanout_mutex);
err = -EINVAL;
if (!po->running)
goto out;
err = -EALREADY;
if (po->fanout)
return -EALREADY;
goto out;
if (type == PACKET_FANOUT_ROLLOVER ||
(type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
po->rollover = kzalloc(sizeof(*po->rollover), GFP_KERNEL);
if (!po->rollover)
return -ENOMEM;
atomic_long_set(&po->rollover->num, 0);
atomic_long_set(&po->rollover->num_huge, 0);
atomic_long_set(&po->rollover->num_failed, 0);
err = -ENOMEM;
rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
if (!rollover)
goto out;
atomic_long_set(&rollover->num, 0);
atomic_long_set(&rollover->num_huge, 0);
atomic_long_set(&rollover->num_failed, 0);
po->rollover = rollover;
}
mutex_lock(&fanout_mutex);
match = NULL;
list_for_each_entry(f, &fanout_list, list) {
if (f->id == id &&
@ -1712,11 +1718,11 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
}
}
out:
mutex_unlock(&fanout_mutex);
if (err) {
kfree(po->rollover);
if (err && rollover) {
kfree(rollover);
po->rollover = NULL;
}
mutex_unlock(&fanout_mutex);
return err;
}
@ -1725,11 +1731,9 @@ static void fanout_release(struct sock *sk)
struct packet_sock *po = pkt_sk(sk);
struct packet_fanout *f;
f = po->fanout;
if (!f)
return;
mutex_lock(&fanout_mutex);
f = po->fanout;
if (f) {
po->fanout = NULL;
if (atomic_dec_and_test(&f->sk_ref)) {
@ -1738,10 +1742,11 @@ static void fanout_release(struct sock *sk)
fanout_release_data(f);
kfree(f);
}
mutex_unlock(&fanout_mutex);
if (po->rollover)
kfree_rcu(po->rollover, rcu);
}
mutex_unlock(&fanout_mutex);
}
static bool packet_extra_vlan_len_allowed(const struct net_device *dev,

Просмотреть файл

@ -104,7 +104,7 @@ static int attach_filter(int cg_fd, int type, int verdict)
return EXIT_FAILURE;
}
ret = bpf_prog_attach(prog_fd, cg_fd, type);
ret = bpf_prog_attach(prog_fd, cg_fd, type, 0);
if (ret < 0) {
printf("Failed to attach prog to cgroup: '%s'\n",
strerror(errno));

Просмотреть файл

@ -79,11 +79,12 @@ int main(int argc, char **argv)
if (join_cgroup(FOO))
goto err;
if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS)) {
if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) {
log_err("Attaching prog to /foo");
goto err;
}
printf("Attached DROP prog. This ping in cgroup /foo should fail...\n");
assert(system(PING_CMD) != 0);
/* Create cgroup /foo/bar, get fd, and join it */
@ -94,24 +95,27 @@ int main(int argc, char **argv)
if (join_cgroup(BAR))
goto err;
printf("Attached DROP prog. This ping in cgroup /foo/bar should fail...\n");
assert(system(PING_CMD) != 0);
if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS)) {
if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
log_err("Attaching prog to /foo/bar");
goto err;
}
printf("Attached PASS prog. This ping in cgroup /foo/bar should pass...\n");
assert(system(PING_CMD) == 0);
if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) {
log_err("Detaching program from /foo/bar");
goto err;
}
printf("Detached PASS from /foo/bar while DROP is attached to /foo.\n"
"This ping in cgroup /foo/bar should fail...\n");
assert(system(PING_CMD) != 0);
if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS)) {
if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
log_err("Attaching prog to /foo/bar");
goto err;
}
@ -121,8 +125,60 @@ int main(int argc, char **argv)
goto err;
}
printf("Attached PASS from /foo/bar and detached DROP from /foo.\n"
"This ping in cgroup /foo/bar should pass...\n");
assert(system(PING_CMD) == 0);
if (bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
log_err("Attaching prog to /foo/bar");
goto err;
}
if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) {
errno = 0;
log_err("Unexpected success attaching prog to /foo/bar");
goto err;
}
if (bpf_prog_detach(bar, BPF_CGROUP_INET_EGRESS)) {
log_err("Detaching program from /foo/bar");
goto err;
}
if (!bpf_prog_detach(foo, BPF_CGROUP_INET_EGRESS)) {
errno = 0;
log_err("Unexpected success in double detach from /foo");
goto err;
}
if (bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) {
log_err("Attaching non-overridable prog to /foo");
goto err;
}
if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 0)) {
errno = 0;
log_err("Unexpected success attaching non-overridable prog to /foo/bar");
goto err;
}
if (!bpf_prog_attach(allow_prog, bar, BPF_CGROUP_INET_EGRESS, 1)) {
errno = 0;
log_err("Unexpected success attaching overridable prog to /foo/bar");
goto err;
}
if (!bpf_prog_attach(allow_prog, foo, BPF_CGROUP_INET_EGRESS, 1)) {
errno = 0;
log_err("Unexpected success attaching overridable prog to /foo");
goto err;
}
if (bpf_prog_attach(drop_prog, foo, BPF_CGROUP_INET_EGRESS, 0)) {
log_err("Attaching different non-overridable prog to /foo");
goto err;
}
goto out;
err:
@ -132,5 +188,9 @@ out:
close(foo);
close(bar);
cleanup_cgroup_environment();
if (!rc)
printf("PASS\n");
else
printf("FAIL\n");
return rc;
}

Просмотреть файл

@ -75,7 +75,7 @@ int main(int argc, char **argv)
return EXIT_FAILURE;
}
ret = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_INET_SOCK_CREATE);
ret = bpf_prog_attach(prog_fd, cg_fd, BPF_CGROUP_INET_SOCK_CREATE, 0);
if (ret < 0) {
printf("Failed to attach prog to cgroup: '%s'\n",
strerror(errno));

Просмотреть файл

@ -55,7 +55,7 @@ int main(int argc, char **argv)
}
ret = bpf_prog_attach(prog_fd[filter_id], cg_fd,
BPF_CGROUP_INET_SOCK_CREATE);
BPF_CGROUP_INET_SOCK_CREATE, 0);
if (ret < 0) {
printf("Failed to attach prog to cgroup: '%s'\n",
strerror(errno));

Просмотреть файл

@ -123,6 +123,12 @@ enum bpf_attach_type {
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
/* If BPF_F_ALLOW_OVERRIDE flag is used in BPF_PROG_ATTACH command
* to the given target_fd cgroup the descendent cgroup will be able to
* override effective bpf program that was inherited from this cgroup
*/
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
#define BPF_PSEUDO_MAP_FD 1
/* flags for BPF_MAP_UPDATE_ELEM command */
@ -178,6 +184,7 @@ union bpf_attr {
__u32 target_fd; /* container object to attach to */
__u32 attach_bpf_fd; /* eBPF program to attach */
__u32 attach_type;
__u32 attach_flags;
};
} __attribute__((aligned(8)));

Просмотреть файл

@ -168,7 +168,8 @@ int bpf_obj_get(const char *pathname)
return sys_bpf(BPF_OBJ_GET, &attr, sizeof(attr));
}
int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type)
int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type,
unsigned int flags)
{
union bpf_attr attr;
@ -176,6 +177,7 @@ int bpf_prog_attach(int prog_fd, int target_fd, enum bpf_attach_type type)
attr.target_fd = target_fd;
attr.attach_bpf_fd = prog_fd;
attr.attach_type = type;
attr.attach_flags = flags;
return sys_bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
}

Просмотреть файл

@ -41,7 +41,8 @@ int bpf_map_delete_elem(int fd, const void *key);
int bpf_map_get_next_key(int fd, const void *key, void *next_key);
int bpf_obj_pin(int fd, const char *pathname);
int bpf_obj_get(const char *pathname);
int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type);
int bpf_prog_attach(int prog_fd, int attachable_fd, enum bpf_attach_type type,
unsigned int flags);
int bpf_prog_detach(int attachable_fd, enum bpf_attach_type type);

Просмотреть файл

@ -1199,7 +1199,7 @@ static int ui_init(void)
BUG_ON(1);
}
perf_hpp__register_sort_field(fmt);
perf_hpp__prepend_sort_field(fmt);
return 0;
}

Просмотреть файл

@ -521,6 +521,12 @@ void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
list_add_tail(&format->sort_list, &list->sorts);
}
void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
struct perf_hpp_fmt *format)
{
list_add(&format->sort_list, &list->sorts);
}
void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
{
list_del(&format->list);
@ -560,6 +566,10 @@ void perf_hpp__setup_output_field(struct perf_hpp_list *list)
perf_hpp_list__for_each_sort_list(list, fmt) {
struct perf_hpp_fmt *pos;
/* skip sort-only fields ("sort_compute" in perf diff) */
if (!fmt->entry && !fmt->color)
continue;
perf_hpp_list__for_each_format(list, pos) {
if (fmt_equal(fmt, pos))
goto next;

Просмотреть файл

@ -437,7 +437,7 @@ fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
}
call->ip = cursor_node->ip;
call->ms.sym = cursor_node->sym;
call->ms.map = cursor_node->map;
call->ms.map = map__get(cursor_node->map);
if (cursor_node->branch) {
call->branch_count = 1;
@ -477,6 +477,7 @@ add_child(struct callchain_node *parent,
list_for_each_entry_safe(call, tmp, &new->val, list) {
list_del(&call->list);
map__zput(call->ms.map);
free(call);
}
free(new);
@ -761,6 +762,7 @@ merge_chain_branch(struct callchain_cursor *cursor,
list->ms.map, list->ms.sym,
false, NULL, 0, 0);
list_del(&list->list);
map__zput(list->ms.map);
free(list);
}
@ -811,7 +813,8 @@ int callchain_cursor_append(struct callchain_cursor *cursor,
}
node->ip = ip;
node->map = map;
map__zput(node->map);
node->map = map__get(map);
node->sym = sym;
node->branch = branch;
node->nr_loop_iter = nr_loop_iter;
@ -1142,11 +1145,13 @@ static void free_callchain_node(struct callchain_node *node)
list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
list_del(&list->list);
map__zput(list->ms.map);
free(list);
}
list_for_each_entry_safe(list, tmp, &node->val, list) {
list_del(&list->list);
map__zput(list->ms.map);
free(list);
}
@ -1210,6 +1215,7 @@ int callchain_node__make_parent_list(struct callchain_node *node)
goto out;
*new = *chain;
new->has_children = false;
map__get(new->ms.map);
list_add_tail(&new->list, &head);
}
parent = parent->parent;
@ -1230,6 +1236,7 @@ int callchain_node__make_parent_list(struct callchain_node *node)
out:
list_for_each_entry_safe(chain, new, &head, list) {
list_del(&chain->list);
map__zput(chain->ms.map);
free(chain);
}
return -ENOMEM;

Просмотреть файл

@ -5,6 +5,7 @@
#include <linux/list.h>
#include <linux/rbtree.h>
#include "event.h"
#include "map.h"
#include "symbol.h"
#define HELP_PAD "\t\t\t\t"
@ -184,8 +185,13 @@ int callchain_merge(struct callchain_cursor *cursor,
*/
static inline void callchain_cursor_reset(struct callchain_cursor *cursor)
{
struct callchain_cursor_node *node;
cursor->nr = 0;
cursor->last = &cursor->first;
for (node = cursor->first; node != NULL; node = node->next)
map__zput(node->map);
}
int callchain_cursor_append(struct callchain_cursor *cursor, u64 ip,

Просмотреть файл

@ -1,6 +1,7 @@
#include "util.h"
#include "build-id.h"
#include "hist.h"
#include "map.h"
#include "session.h"
#include "sort.h"
#include "evlist.h"
@ -1019,6 +1020,10 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
int max_stack_depth, void *arg)
{
int err, err2;
struct map *alm = NULL;
if (al && al->map)
alm = map__get(al->map);
err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
iter->evsel, al, max_stack_depth);
@ -1058,6 +1063,8 @@ out:
if (!err)
err = err2;
map__put(alm);
return err;
}

Просмотреть файл

@ -283,6 +283,8 @@ void perf_hpp_list__column_register(struct perf_hpp_list *list,
struct perf_hpp_fmt *format);
void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
struct perf_hpp_fmt *format);
void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
struct perf_hpp_fmt *format);
static inline void perf_hpp__column_register(struct perf_hpp_fmt *format)
{
@ -294,6 +296,11 @@ static inline void perf_hpp__register_sort_field(struct perf_hpp_fmt *format)
perf_hpp_list__register_sort_field(&perf_hpp_list, format);
}
static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format)
{
perf_hpp_list__prepend_sort_field(&perf_hpp_list, format);
}
#define perf_hpp_list__for_each_format(_list, format) \
list_for_each_entry(format, &(_list)->fields, list)