Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
This commit is contained in:
Коммит
76eb75be79
|
@ -12,7 +12,7 @@ DOCBOOKS := z8530book.xml \
|
|||
kernel-api.xml filesystems.xml lsm.xml kgdb.xml \
|
||||
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
|
||||
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
|
||||
80211.xml sh.xml regulator.xml w1.xml \
|
||||
sh.xml regulator.xml w1.xml \
|
||||
writing_musb_glue_layer.xml iio.xml
|
||||
|
||||
ifeq ($(DOCBOOKS),)
|
||||
|
|
|
@ -54,9 +54,9 @@ This is the hardware sector size of the device, in bytes.
|
|||
|
||||
io_poll (RW)
|
||||
------------
|
||||
When read, this file shows the total number of block IO polls and how
|
||||
many returned success. Writing '0' to this file will disable polling
|
||||
for this device. Writing any non-zero value will enable this feature.
|
||||
When read, this file shows whether polling is enabled (1) or disabled
|
||||
(0). Writing '0' to this file will disable polling for this device.
|
||||
Writing any non-zero value will enable this feature.
|
||||
|
||||
io_poll_delay (RW)
|
||||
------------------
|
||||
|
|
|
@ -5,8 +5,8 @@ platform_labels - INTEGER
|
|||
possible to configure forwarding for label values equal to or
|
||||
greater than the number of platform labels.
|
||||
|
||||
A dense utliziation of the entries in the platform label table
|
||||
is possible and expected aas the platform labels are locally
|
||||
A dense utilization of the entries in the platform label table
|
||||
is possible and expected as the platform labels are locally
|
||||
allocated.
|
||||
|
||||
If the number of platform label table entries is set to 0 no
|
||||
|
|
|
@ -151,7 +151,7 @@ bool ether_addr_equal(const u8 *addr1, const u8 *addr2)
|
|||
#else
|
||||
const u16 *a = (const u16 *)addr1;
|
||||
const u16 *b = (const u16 *)addr2;
|
||||
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0;
|
||||
return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
17
MAINTAINERS
17
MAINTAINERS
|
@ -5086,9 +5086,11 @@ F: drivers/net/wan/dlci.c
|
|||
F: drivers/net/wan/sdla.c
|
||||
|
||||
FRAMEBUFFER LAYER
|
||||
M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
|
||||
L: linux-fbdev@vger.kernel.org
|
||||
T: git git://github.com/bzolnier/linux.git
|
||||
Q: http://patchwork.kernel.org/project/linux-fbdev/list/
|
||||
S: Orphan
|
||||
S: Maintained
|
||||
F: Documentation/fb/
|
||||
F: drivers/video/
|
||||
F: include/video/
|
||||
|
@ -8858,17 +8860,22 @@ F: drivers/video/fbdev/nvidia/
|
|||
NVM EXPRESS DRIVER
|
||||
M: Keith Busch <keith.busch@intel.com>
|
||||
M: Jens Axboe <axboe@fb.com>
|
||||
M: Christoph Hellwig <hch@lst.de>
|
||||
M: Sagi Grimberg <sagi@grimberg.me>
|
||||
L: linux-nvme@lists.infradead.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
|
||||
W: https://kernel.googlesource.com/pub/scm/linux/kernel/git/axboe/linux-block/
|
||||
T: git://git.infradead.org/nvme.git
|
||||
W: http://git.infradead.org/nvme.git
|
||||
S: Supported
|
||||
F: drivers/nvme/host/
|
||||
F: include/linux/nvme.h
|
||||
F: include/uapi/linux/nvme_ioctl.h
|
||||
|
||||
NVM EXPRESS TARGET DRIVER
|
||||
M: Christoph Hellwig <hch@lst.de>
|
||||
M: Sagi Grimberg <sagi@grimberg.me>
|
||||
L: linux-nvme@lists.infradead.org
|
||||
T: git://git.infradead.org/nvme.git
|
||||
W: http://git.infradead.org/nvme.git
|
||||
S: Supported
|
||||
F: drivers/nvme/target/
|
||||
|
||||
|
@ -13533,11 +13540,11 @@ F: arch/x86/xen/*swiotlb*
|
|||
F: drivers/xen/*swiotlb*
|
||||
|
||||
XFS FILESYSTEM
|
||||
M: Dave Chinner <david@fromorbit.com>
|
||||
M: Darrick J. Wong <darrick.wong@oracle.com>
|
||||
M: linux-xfs@vger.kernel.org
|
||||
L: linux-xfs@vger.kernel.org
|
||||
W: http://xfs.org/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dgc/linux-xfs.git
|
||||
T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
|
||||
S: Supported
|
||||
F: Documentation/filesystems/xfs.txt
|
||||
F: fs/xfs/
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Roaring Lionus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -44,6 +44,8 @@ SECTIONS
|
|||
/* Read-only sections, merged into text segment: */
|
||||
. = LOAD_BASE ;
|
||||
|
||||
_text = .;
|
||||
|
||||
/* _s_kernel_ro must be page aligned */
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_s_kernel_ro = .;
|
||||
|
|
|
@ -49,7 +49,6 @@ struct thread_info {
|
|||
#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling TIF_NEED_RESCHED */
|
||||
#define TIF_32BIT 4 /* 32 bit binary */
|
||||
#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
|
||||
#define TIF_RESTORE_SIGMASK 6 /* restore saved signal mask */
|
||||
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
|
||||
#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
|
||||
#define TIF_SINGLESTEP 9 /* single stepping? */
|
||||
|
|
|
@ -235,9 +235,26 @@ void __init time_init(void)
|
|||
|
||||
cr16_hz = 100 * PAGE0->mem_10msec; /* Hz */
|
||||
|
||||
/* register at clocksource framework */
|
||||
clocksource_register_hz(&clocksource_cr16, cr16_hz);
|
||||
|
||||
/* register as sched_clock source */
|
||||
sched_clock_register(read_cr16_sched_clock, BITS_PER_LONG, cr16_hz);
|
||||
}
|
||||
|
||||
static int __init init_cr16_clocksource(void)
|
||||
{
|
||||
/*
|
||||
* The cr16 interval timers are not syncronized across CPUs, so mark
|
||||
* them unstable and lower rating on SMP systems.
|
||||
*/
|
||||
if (num_online_cpus() > 1) {
|
||||
clocksource_cr16.flags = CLOCK_SOURCE_UNSTABLE;
|
||||
clocksource_cr16.rating = 0;
|
||||
}
|
||||
|
||||
/* register at clocksource framework */
|
||||
clocksource_register_hz(&clocksource_cr16,
|
||||
100 * PAGE0->mem_10msec);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
device_initcall(init_cr16_clocksource);
|
||||
|
|
|
@ -234,7 +234,7 @@ show_signal_msg(struct pt_regs *regs, unsigned long code,
|
|||
tsk->comm, code, address);
|
||||
print_vma_addr(KERN_CONT " in ", regs->iaoq[0]);
|
||||
|
||||
pr_cont(" trap #%lu: %s%c", code, trap_name(code),
|
||||
pr_cont("\ntrap #%lu: %s%c", code, trap_name(code),
|
||||
vma ? ',':'\n');
|
||||
|
||||
if (vma)
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
#ifndef _ASM_S390_PROTOTYPES_H
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm/fpu/api.h>
|
||||
#include <asm-generic/asm-prototypes.h>
|
||||
|
||||
#endif /* _ASM_S390_PROTOTYPES_H */
|
|
@ -94,7 +94,7 @@ static void update_mt_scaling(void)
|
|||
* Update process times based on virtual cpu times stored by entry.S
|
||||
* to the lowcore fields user_timer, system_timer & steal_clock.
|
||||
*/
|
||||
static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
|
||||
static int do_account_vtime(struct task_struct *tsk)
|
||||
{
|
||||
u64 timer, clock, user, system, steal;
|
||||
u64 user_scaled, system_scaled;
|
||||
|
@ -138,7 +138,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
|
|||
}
|
||||
account_user_time(tsk, user);
|
||||
tsk->utimescaled += user_scaled;
|
||||
account_system_time(tsk, hardirq_offset, system);
|
||||
account_system_time(tsk, 0, system);
|
||||
tsk->stimescaled += system_scaled;
|
||||
|
||||
steal = S390_lowcore.steal_timer;
|
||||
|
@ -152,7 +152,7 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
|
|||
|
||||
void vtime_task_switch(struct task_struct *prev)
|
||||
{
|
||||
do_account_vtime(prev, 0);
|
||||
do_account_vtime(prev);
|
||||
prev->thread.user_timer = S390_lowcore.user_timer;
|
||||
prev->thread.system_timer = S390_lowcore.system_timer;
|
||||
S390_lowcore.user_timer = current->thread.user_timer;
|
||||
|
@ -166,7 +166,7 @@ void vtime_task_switch(struct task_struct *prev)
|
|||
*/
|
||||
void vtime_account_user(struct task_struct *tsk)
|
||||
{
|
||||
if (do_account_vtime(tsk, HARDIRQ_OFFSET))
|
||||
if (do_account_vtime(tsk))
|
||||
virt_timer_expire();
|
||||
}
|
||||
|
||||
|
|
|
@ -139,6 +139,19 @@ static __always_inline void __clear_bit(long nr, volatile unsigned long *addr)
|
|||
asm volatile("btr %1,%0" : ADDR : "Ir" (nr));
|
||||
}
|
||||
|
||||
static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
|
||||
{
|
||||
bool negative;
|
||||
asm volatile(LOCK_PREFIX "andb %2,%1\n\t"
|
||||
CC_SET(s)
|
||||
: CC_OUT(s) (negative), ADDR
|
||||
: "ir" ((char) ~(1 << nr)) : "memory");
|
||||
return negative;
|
||||
}
|
||||
|
||||
// Let everybody know we have it
|
||||
#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
|
||||
|
||||
/*
|
||||
* __clear_bit_unlock - Clears a bit in memory
|
||||
* @nr: Bit to clear
|
||||
|
|
|
@ -544,6 +544,8 @@ static inline bool may_queue(struct rq_wb *rwb, struct rq_wait *rqw,
|
|||
* the timer to kick off queuing again.
|
||||
*/
|
||||
static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
|
||||
__releases(lock)
|
||||
__acquires(lock)
|
||||
{
|
||||
struct rq_wait *rqw = get_rq_wait(rwb, current_is_kswapd());
|
||||
DEFINE_WAIT(wait);
|
||||
|
@ -558,13 +560,12 @@ static void __wbt_wait(struct rq_wb *rwb, unsigned long rw, spinlock_t *lock)
|
|||
if (may_queue(rwb, rqw, &wait, rw))
|
||||
break;
|
||||
|
||||
if (lock)
|
||||
if (lock) {
|
||||
spin_unlock_irq(lock);
|
||||
|
||||
io_schedule();
|
||||
|
||||
if (lock)
|
||||
io_schedule();
|
||||
spin_lock_irq(lock);
|
||||
} else
|
||||
io_schedule();
|
||||
} while (1);
|
||||
|
||||
finish_wait(&rqw->wait, &wait);
|
||||
|
@ -595,7 +596,7 @@ static inline bool wbt_should_throttle(struct rq_wb *rwb, struct bio *bio)
|
|||
* in an irq held spinlock, if it holds one when calling this function.
|
||||
* If we do sleep, we'll release and re-grab it.
|
||||
*/
|
||||
unsigned int wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
|
||||
enum wbt_flags wbt_wait(struct rq_wb *rwb, struct bio *bio, spinlock_t *lock)
|
||||
{
|
||||
unsigned int ret = 0;
|
||||
|
||||
|
|
|
@ -1461,16 +1461,25 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
|
|||
for (i = 0; i < ctcount; i++) {
|
||||
unsigned int dlen = COMP_BUF_SIZE;
|
||||
int ilen = ctemplate[i].inlen;
|
||||
void *input_vec;
|
||||
|
||||
input_vec = kmalloc(ilen, GFP_KERNEL);
|
||||
if (!input_vec) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy(input_vec, ctemplate[i].input, ilen);
|
||||
memset(output, 0, dlen);
|
||||
init_completion(&result.completion);
|
||||
sg_init_one(&src, ctemplate[i].input, ilen);
|
||||
sg_init_one(&src, input_vec, ilen);
|
||||
sg_init_one(&dst, output, dlen);
|
||||
|
||||
req = acomp_request_alloc(tfm);
|
||||
if (!req) {
|
||||
pr_err("alg: acomp: request alloc failed for %s\n",
|
||||
algo);
|
||||
kfree(input_vec);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1483,6 +1492,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
|
|||
if (ret) {
|
||||
pr_err("alg: acomp: compression failed on test %d for %s: ret=%d\n",
|
||||
i + 1, algo, -ret);
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
|
@ -1491,6 +1501,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
|
|||
pr_err("alg: acomp: Compression test %d failed for %s: output len = %d\n",
|
||||
i + 1, algo, req->dlen);
|
||||
ret = -EINVAL;
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
|
@ -1500,26 +1511,37 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
|
|||
i + 1, algo);
|
||||
hexdump(output, req->dlen);
|
||||
ret = -EINVAL;
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
}
|
||||
|
||||
for (i = 0; i < dtcount; i++) {
|
||||
unsigned int dlen = COMP_BUF_SIZE;
|
||||
int ilen = dtemplate[i].inlen;
|
||||
void *input_vec;
|
||||
|
||||
input_vec = kmalloc(ilen, GFP_KERNEL);
|
||||
if (!input_vec) {
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
memcpy(input_vec, dtemplate[i].input, ilen);
|
||||
memset(output, 0, dlen);
|
||||
init_completion(&result.completion);
|
||||
sg_init_one(&src, dtemplate[i].input, ilen);
|
||||
sg_init_one(&src, input_vec, ilen);
|
||||
sg_init_one(&dst, output, dlen);
|
||||
|
||||
req = acomp_request_alloc(tfm);
|
||||
if (!req) {
|
||||
pr_err("alg: acomp: request alloc failed for %s\n",
|
||||
algo);
|
||||
kfree(input_vec);
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1532,6 +1554,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
|
|||
if (ret) {
|
||||
pr_err("alg: acomp: decompression failed on test %d for %s: ret=%d\n",
|
||||
i + 1, algo, -ret);
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
|
@ -1540,6 +1563,7 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
|
|||
pr_err("alg: acomp: Decompression test %d failed for %s: output len = %d\n",
|
||||
i + 1, algo, req->dlen);
|
||||
ret = -EINVAL;
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
|
@ -1549,10 +1573,12 @@ static int test_acomp(struct crypto_acomp *tfm, struct comp_testvec *ctemplate,
|
|||
i + 1, algo);
|
||||
hexdump(output, req->dlen);
|
||||
ret = -EINVAL;
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
goto out;
|
||||
}
|
||||
|
||||
kfree(input_vec);
|
||||
acomp_request_free(req);
|
||||
}
|
||||
|
||||
|
|
|
@ -273,7 +273,8 @@ struct mv_cesa_op_ctx {
|
|||
#define CESA_TDMA_SRC_IN_SRAM BIT(30)
|
||||
#define CESA_TDMA_END_OF_REQ BIT(29)
|
||||
#define CESA_TDMA_BREAK_CHAIN BIT(28)
|
||||
#define CESA_TDMA_TYPE_MSK GENMASK(27, 0)
|
||||
#define CESA_TDMA_SET_STATE BIT(27)
|
||||
#define CESA_TDMA_TYPE_MSK GENMASK(26, 0)
|
||||
#define CESA_TDMA_DUMMY 0
|
||||
#define CESA_TDMA_DATA 1
|
||||
#define CESA_TDMA_OP 2
|
||||
|
|
|
@ -280,13 +280,32 @@ static void mv_cesa_ahash_std_prepare(struct ahash_request *req)
|
|||
sreq->offset = 0;
|
||||
}
|
||||
|
||||
static void mv_cesa_ahash_dma_step(struct ahash_request *req)
|
||||
{
|
||||
struct mv_cesa_ahash_req *creq = ahash_request_ctx(req);
|
||||
struct mv_cesa_req *base = &creq->base;
|
||||
|
||||
/* We must explicitly set the digest state. */
|
||||
if (base->chain.first->flags & CESA_TDMA_SET_STATE) {
|
||||
struct mv_cesa_engine *engine = base->engine;
|
||||
int i;
|
||||
|
||||
/* Set the hash state in the IVDIG regs. */
|
||||
for (i = 0; i < ARRAY_SIZE(creq->state); i++)
|
||||
writel_relaxed(creq->state[i], engine->regs +
|
||||
CESA_IVDIG(i));
|
||||
}
|
||||
|
||||
mv_cesa_dma_step(base);
|
||||
}
|
||||
|
||||
static void mv_cesa_ahash_step(struct crypto_async_request *req)
|
||||
{
|
||||
struct ahash_request *ahashreq = ahash_request_cast(req);
|
||||
struct mv_cesa_ahash_req *creq = ahash_request_ctx(ahashreq);
|
||||
|
||||
if (mv_cesa_req_get_type(&creq->base) == CESA_DMA_REQ)
|
||||
mv_cesa_dma_step(&creq->base);
|
||||
mv_cesa_ahash_dma_step(ahashreq);
|
||||
else
|
||||
mv_cesa_ahash_std_step(ahashreq);
|
||||
}
|
||||
|
@ -584,12 +603,16 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
|
|||
struct mv_cesa_ahash_dma_iter iter;
|
||||
struct mv_cesa_op_ctx *op = NULL;
|
||||
unsigned int frag_len;
|
||||
bool set_state = false;
|
||||
int ret;
|
||||
u32 type;
|
||||
|
||||
basereq->chain.first = NULL;
|
||||
basereq->chain.last = NULL;
|
||||
|
||||
if (!mv_cesa_mac_op_is_first_frag(&creq->op_tmpl))
|
||||
set_state = true;
|
||||
|
||||
if (creq->src_nents) {
|
||||
ret = dma_map_sg(cesa_dev->dev, req->src, creq->src_nents,
|
||||
DMA_TO_DEVICE);
|
||||
|
@ -683,6 +706,15 @@ static int mv_cesa_ahash_dma_req_init(struct ahash_request *req)
|
|||
if (type != CESA_TDMA_RESULT)
|
||||
basereq->chain.last->flags |= CESA_TDMA_BREAK_CHAIN;
|
||||
|
||||
if (set_state) {
|
||||
/*
|
||||
* Put the CESA_TDMA_SET_STATE flag on the first tdma desc to
|
||||
* let the step logic know that the IVDIG registers should be
|
||||
* explicitly set before launching a TDMA chain.
|
||||
*/
|
||||
basereq->chain.first->flags |= CESA_TDMA_SET_STATE;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_tdma:
|
||||
|
|
|
@ -109,7 +109,14 @@ void mv_cesa_tdma_chain(struct mv_cesa_engine *engine,
|
|||
last->next = dreq->chain.first;
|
||||
engine->chain.last = dreq->chain.last;
|
||||
|
||||
if (!(last->flags & CESA_TDMA_BREAK_CHAIN))
|
||||
/*
|
||||
* Break the DMA chain if the CESA_TDMA_BREAK_CHAIN is set on
|
||||
* the last element of the current chain, or if the request
|
||||
* being queued needs the IV regs to be set before lauching
|
||||
* the request.
|
||||
*/
|
||||
if (!(last->flags & CESA_TDMA_BREAK_CHAIN) &&
|
||||
!(dreq->chain.first->flags & CESA_TDMA_SET_STATE))
|
||||
last->next_dma = dreq->chain.first->cur_dma;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,7 +64,8 @@ MODULE_DESCRIPTION("Asus HID Keyboard and TouchPad");
|
|||
#define QUIRK_SKIP_INPUT_MAPPING BIT(2)
|
||||
#define QUIRK_IS_MULTITOUCH BIT(3)
|
||||
|
||||
#define NOTEBOOK_QUIRKS QUIRK_FIX_NOTEBOOK_REPORT
|
||||
#define KEYBOARD_QUIRKS (QUIRK_FIX_NOTEBOOK_REPORT | \
|
||||
QUIRK_NO_INIT_REPORTS)
|
||||
#define TOUCHPAD_QUIRKS (QUIRK_NO_INIT_REPORTS | \
|
||||
QUIRK_SKIP_INPUT_MAPPING | \
|
||||
QUIRK_IS_MULTITOUCH)
|
||||
|
@ -170,11 +171,11 @@ static int asus_raw_event(struct hid_device *hdev,
|
|||
|
||||
static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
|
||||
{
|
||||
struct input_dev *input = hi->input;
|
||||
struct asus_drvdata *drvdata = hid_get_drvdata(hdev);
|
||||
|
||||
if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
|
||||
int ret;
|
||||
struct input_dev *input = hi->input;
|
||||
|
||||
input_set_abs_params(input, ABS_MT_POSITION_X, 0, MAX_X, 0, 0);
|
||||
input_set_abs_params(input, ABS_MT_POSITION_Y, 0, MAX_Y, 0, 0);
|
||||
|
@ -191,10 +192,10 @@ static int asus_input_configured(struct hid_device *hdev, struct hid_input *hi)
|
|||
hid_err(hdev, "Asus input mt init slots failed: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drvdata->input = input;
|
||||
}
|
||||
|
||||
drvdata->input = input;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -286,7 +287,11 @@ static int asus_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
|||
goto err_stop_hw;
|
||||
}
|
||||
|
||||
drvdata->input->name = "Asus TouchPad";
|
||||
if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
|
||||
drvdata->input->name = "Asus TouchPad";
|
||||
} else {
|
||||
drvdata->input->name = "Asus Keyboard";
|
||||
}
|
||||
|
||||
if (drvdata->quirks & QUIRK_IS_MULTITOUCH) {
|
||||
ret = asus_start_multitouch(hdev);
|
||||
|
@ -315,7 +320,7 @@ static __u8 *asus_report_fixup(struct hid_device *hdev, __u8 *rdesc,
|
|||
|
||||
static const struct hid_device_id asus_devices[] = {
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
|
||||
USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), NOTEBOOK_QUIRKS},
|
||||
USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD), KEYBOARD_QUIRKS},
|
||||
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK,
|
||||
USB_DEVICE_ID_ASUSTEK_TOUCHPAD), TOUCHPAD_QUIRKS },
|
||||
{ }
|
||||
|
|
|
@ -319,6 +319,7 @@
|
|||
#define USB_VENDOR_ID_DRAGONRISE 0x0079
|
||||
#define USB_DEVICE_ID_DRAGONRISE_WIIU 0x1800
|
||||
#define USB_DEVICE_ID_DRAGONRISE_PS3 0x1801
|
||||
#define USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR 0x1803
|
||||
#define USB_DEVICE_ID_DRAGONRISE_GAMECUBE 0x1843
|
||||
|
||||
#define USB_VENDOR_ID_DWAV 0x0eef
|
||||
|
@ -365,6 +366,9 @@
|
|||
#define USB_VENDOR_ID_FLATFROG 0x25b5
|
||||
#define USB_DEVICE_ID_MULTITOUCH_3200 0x0002
|
||||
|
||||
#define USB_VENDOR_ID_FUTABA 0x0547
|
||||
#define USB_DEVICE_ID_LED_DISPLAY 0x7000
|
||||
|
||||
#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f
|
||||
#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
|
||||
|
||||
|
|
|
@ -212,7 +212,6 @@ int sensor_hub_set_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
|
|||
__s32 value;
|
||||
int ret = 0;
|
||||
|
||||
memset(buffer, 0, buffer_size);
|
||||
mutex_lock(&data->mutex);
|
||||
report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
|
||||
if (!report || (field_index >= report->maxfield)) {
|
||||
|
@ -256,6 +255,8 @@ int sensor_hub_get_feature(struct hid_sensor_hub_device *hsdev, u32 report_id,
|
|||
int buffer_index = 0;
|
||||
int i;
|
||||
|
||||
memset(buffer, 0, buffer_size);
|
||||
|
||||
mutex_lock(&data->mutex);
|
||||
report = sensor_hub_report(report_id, hsdev->hdev, HID_FEATURE_REPORT);
|
||||
if (!report || (field_index >= report->maxfield) ||
|
||||
|
|
|
@ -1099,8 +1099,11 @@ struct sony_sc {
|
|||
u8 led_delay_on[MAX_LEDS];
|
||||
u8 led_delay_off[MAX_LEDS];
|
||||
u8 led_count;
|
||||
bool ds4_dongle_connected;
|
||||
};
|
||||
|
||||
static void sony_set_leds(struct sony_sc *sc);
|
||||
|
||||
static inline void sony_schedule_work(struct sony_sc *sc)
|
||||
{
|
||||
if (!sc->defer_initialization)
|
||||
|
@ -1430,6 +1433,31 @@ static int sony_raw_event(struct hid_device *hdev, struct hid_report *report,
|
|||
return -EILSEQ;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* In the case of a DS4 USB dongle, bit[2] of byte 31 indicates
|
||||
* if a DS4 is actually connected (indicated by '0').
|
||||
* For non-dongle, this bit is always 0 (connected).
|
||||
*/
|
||||
if (sc->hdev->vendor == USB_VENDOR_ID_SONY &&
|
||||
sc->hdev->product == USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) {
|
||||
bool connected = (rd[31] & 0x04) ? false : true;
|
||||
|
||||
if (!sc->ds4_dongle_connected && connected) {
|
||||
hid_info(sc->hdev, "DualShock 4 USB dongle: controller connected\n");
|
||||
sony_set_leds(sc);
|
||||
sc->ds4_dongle_connected = true;
|
||||
} else if (sc->ds4_dongle_connected && !connected) {
|
||||
hid_info(sc->hdev, "DualShock 4 USB dongle: controller disconnected\n");
|
||||
sc->ds4_dongle_connected = false;
|
||||
/* Return 0, so hidraw can get the report. */
|
||||
return 0;
|
||||
} else if (!sc->ds4_dongle_connected) {
|
||||
/* Return 0, so hidraw can get the report. */
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
dualshock4_parse_report(sc, rd, size);
|
||||
}
|
||||
|
||||
|
@ -2390,6 +2418,12 @@ static int sony_check_add(struct sony_sc *sc)
|
|||
}
|
||||
|
||||
memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address));
|
||||
|
||||
snprintf(sc->hdev->uniq, sizeof(sc->hdev->uniq),
|
||||
"%02hhx:%02hhx:%02hhx:%02hhx:%02hhx:%02hhx",
|
||||
sc->mac_address[5], sc->mac_address[4],
|
||||
sc->mac_address[3], sc->mac_address[2],
|
||||
sc->mac_address[1], sc->mac_address[0]);
|
||||
} else if ((sc->quirks & SIXAXIS_CONTROLLER_USB) ||
|
||||
(sc->quirks & NAVIGATION_CONTROLLER_USB)) {
|
||||
buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL);
|
||||
|
@ -2548,7 +2582,7 @@ static int sony_input_configured(struct hid_device *hdev,
|
|||
hid_err(sc->hdev,
|
||||
"Unable to initialize multi-touch slots: %d\n",
|
||||
ret);
|
||||
return ret;
|
||||
goto err_stop;
|
||||
}
|
||||
|
||||
sony_init_output_report(sc, dualshock4_send_output_report);
|
||||
|
|
|
@ -83,11 +83,13 @@ static const struct hid_blacklist {
|
|||
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_WIIU, HID_QUIRK_MULTI_INPUT },
|
||||
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3, HID_QUIRK_MULTI_INPUT },
|
||||
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR, HID_QUIRK_MULTI_INPUT },
|
||||
{ USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE, HID_QUIRK_MULTI_INPUT },
|
||||
{ USB_VENDOR_ID_ELAN, HID_ANY_ID, HID_QUIRK_ALWAYS_POLL },
|
||||
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_FUTABA, USB_DEVICE_ID_LED_DISPLAY, HID_QUIRK_NO_INIT_REPORTS },
|
||||
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A, HID_QUIRK_ALWAYS_POLL },
|
||||
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A, HID_QUIRK_ALWAYS_POLL },
|
||||
{ USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE, HID_QUIRK_ALWAYS_POLL },
|
||||
|
|
|
@ -1682,9 +1682,19 @@ static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_att
|
|||
size += ret;
|
||||
}
|
||||
|
||||
if (mlx4_is_master(mdev->dev) && flow_type == MLX4_FS_REGULAR &&
|
||||
flow_attr->num_of_specs == 1) {
|
||||
struct _rule_hw *rule_header = (struct _rule_hw *)(ctrl + 1);
|
||||
enum ib_flow_spec_type header_spec =
|
||||
((union ib_flow_spec *)(flow_attr + 1))->type;
|
||||
|
||||
if (header_spec == IB_FLOW_SPEC_ETH)
|
||||
mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
|
||||
}
|
||||
|
||||
ret = mlx4_cmd_imm(mdev->dev, mailbox->dma, reg_id, size >> 2, 0,
|
||||
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_WRAPPED);
|
||||
MLX4_CMD_NATIVE);
|
||||
if (ret == -ENOMEM)
|
||||
pr_err("mcg table is full. Fail to register network rule.\n");
|
||||
else if (ret == -ENXIO)
|
||||
|
@ -1701,7 +1711,7 @@ static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id)
|
|||
int err;
|
||||
err = mlx4_cmd(dev, reg_id, 0, 0,
|
||||
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
|
||||
MLX4_CMD_WRAPPED);
|
||||
MLX4_CMD_NATIVE);
|
||||
if (err)
|
||||
pr_err("Fail to detach network rule. registration id = 0x%llx\n",
|
||||
reg_id);
|
||||
|
|
|
@ -1012,6 +1012,18 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
|
|||
goto out;
|
||||
}
|
||||
|
||||
/* The Ethernet switch we are interfaced with needs packets to be at
|
||||
* least 64 bytes (including FCS) otherwise they will be discarded when
|
||||
* they enter the switch port logic. When Broadcom tags are enabled, we
|
||||
* need to make sure that packets are at least 68 bytes
|
||||
* (including FCS and tag) because the length verification is done after
|
||||
* the Broadcom tag is stripped off the ingress packet.
|
||||
*/
|
||||
if (skb_put_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
|
||||
ret = NETDEV_TX_OK;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Insert TSB and checksum infos */
|
||||
if (priv->tsb_en) {
|
||||
skb = bcm_sysport_insert_tsb(skb, dev);
|
||||
|
@ -1021,20 +1033,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
|
|||
}
|
||||
}
|
||||
|
||||
/* The Ethernet switch we are interfaced with needs packets to be at
|
||||
* least 64 bytes (including FCS) otherwise they will be discarded when
|
||||
* they enter the switch port logic. When Broadcom tags are enabled, we
|
||||
* need to make sure that packets are at least 68 bytes
|
||||
* (including FCS and tag) because the length verification is done after
|
||||
* the Broadcom tag is stripped off the ingress packet.
|
||||
*/
|
||||
if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
|
||||
ret = NETDEV_TX_OK;
|
||||
goto out;
|
||||
}
|
||||
|
||||
skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
|
||||
ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
|
||||
skb_len = skb->len;
|
||||
|
||||
mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(kdev, mapping)) {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/**
|
||||
* macb_pci.c - Cadence GEM PCI wrapper.
|
||||
* Cadence GEM PCI wrapper.
|
||||
*
|
||||
* Copyright (C) 2016 Cadence Design Systems - http://www.cadence.com
|
||||
*
|
||||
|
@ -45,32 +45,27 @@ static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
struct macb_platform_data plat_data;
|
||||
struct resource res[2];
|
||||
|
||||
/* sanity check */
|
||||
if (!id)
|
||||
return -EINVAL;
|
||||
|
||||
/* enable pci device */
|
||||
err = pci_enable_device(pdev);
|
||||
err = pcim_enable_device(pdev);
|
||||
if (err < 0) {
|
||||
dev_err(&pdev->dev, "Enabling PCI device has failed: 0x%04X",
|
||||
err);
|
||||
return -EACCES;
|
||||
dev_err(&pdev->dev, "Enabling PCI device has failed: %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
/* set up resources */
|
||||
memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
|
||||
res[0].start = pdev->resource[0].start;
|
||||
res[0].end = pdev->resource[0].end;
|
||||
res[0].start = pci_resource_start(pdev, 0);
|
||||
res[0].end = pci_resource_end(pdev, 0);
|
||||
res[0].name = PCI_DRIVER_NAME;
|
||||
res[0].flags = IORESOURCE_MEM;
|
||||
res[1].start = pdev->irq;
|
||||
res[1].start = pci_irq_vector(pdev, 0);
|
||||
res[1].name = PCI_DRIVER_NAME;
|
||||
res[1].flags = IORESOURCE_IRQ;
|
||||
|
||||
dev_info(&pdev->dev, "EMAC physical base addr = 0x%p\n",
|
||||
(void *)(uintptr_t)pci_resource_start(pdev, 0));
|
||||
dev_info(&pdev->dev, "EMAC physical base addr: %pa\n",
|
||||
&res[0].start);
|
||||
|
||||
/* set up macb platform data */
|
||||
memset(&plat_data, 0, sizeof(plat_data));
|
||||
|
@ -100,7 +95,7 @@ static int macb_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
plat_info.num_res = ARRAY_SIZE(res);
|
||||
plat_info.data = &plat_data;
|
||||
plat_info.size_data = sizeof(plat_data);
|
||||
plat_info.dma_mask = DMA_BIT_MASK(32);
|
||||
plat_info.dma_mask = pdev->dma_mask;
|
||||
|
||||
/* register platform device */
|
||||
plat_dev = platform_device_register_full(&plat_info);
|
||||
|
@ -120,7 +115,6 @@ err_hclk_register:
|
|||
clk_unregister(plat_data.pclk);
|
||||
|
||||
err_pclk_register:
|
||||
pci_disable_device(pdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -130,7 +124,6 @@ static void macb_remove(struct pci_dev *pdev)
|
|||
struct macb_platform_data *plat_data = dev_get_platdata(&plat_dev->dev);
|
||||
|
||||
platform_device_unregister(plat_dev);
|
||||
pci_disable_device(pdev);
|
||||
clk_unregister(plat_data->pclk);
|
||||
clk_unregister(plat_data->hclk);
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ config OCTEON_MGMT_ETHERNET
|
|||
config LIQUIDIO_VF
|
||||
tristate "Cavium LiquidIO VF support"
|
||||
depends on 64BIT && PCI_MSI
|
||||
select PTP_1588_CLOCK
|
||||
imply PTP_1588_CLOCK
|
||||
---help---
|
||||
This driver supports Cavium LiquidIO Intelligent Server Adapter
|
||||
based on CN23XX chips.
|
||||
|
|
|
@ -133,17 +133,15 @@ cxgb_find_route6(struct cxgb4_lld_info *lldi,
|
|||
if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
|
||||
fl6.flowi6_oif = sin6_scope_id;
|
||||
dst = ip6_route_output(&init_net, NULL, &fl6);
|
||||
if (!dst)
|
||||
goto out;
|
||||
if (!cxgb_our_interface(lldi, get_real_dev,
|
||||
ip6_dst_idev(dst)->dev) &&
|
||||
!(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) {
|
||||
if (dst->error ||
|
||||
(!cxgb_our_interface(lldi, get_real_dev,
|
||||
ip6_dst_idev(dst)->dev) &&
|
||||
!(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK))) {
|
||||
dst_release(dst);
|
||||
dst = NULL;
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
return dst;
|
||||
}
|
||||
EXPORT_SYMBOL(cxgb_find_route6);
|
||||
|
|
|
@ -5155,7 +5155,9 @@ static netdev_features_t be_features_check(struct sk_buff *skb,
|
|||
skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
|
||||
skb->inner_protocol != htons(ETH_P_TEB) ||
|
||||
skb_inner_mac_header(skb) - skb_transport_header(skb) !=
|
||||
sizeof(struct udphdr) + sizeof(struct vxlanhdr))
|
||||
sizeof(struct udphdr) + sizeof(struct vxlanhdr) ||
|
||||
!adapter->vxlan_port ||
|
||||
udp_hdr(skb)->dest != adapter->vxlan_port)
|
||||
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
|
||||
|
||||
return features;
|
||||
|
|
|
@ -733,6 +733,7 @@ static int dpaa_eth_cgr_init(struct dpaa_priv *priv)
|
|||
priv->cgr_data.cgr.cb = dpaa_eth_cgscn;
|
||||
|
||||
/* Enable Congestion State Change Notifications and CS taildrop */
|
||||
memset(&initcgr, 0, sizeof(initcgr));
|
||||
initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CSCN_EN | QM_CGR_WE_CS_THRES);
|
||||
initcgr.cgr.cscn_en = QM_CGR_EN;
|
||||
|
||||
|
@ -2291,7 +2292,8 @@ static int dpaa_open(struct net_device *net_dev)
|
|||
net_dev->phydev = mac_dev->init_phy(net_dev, priv->mac_dev);
|
||||
if (!net_dev->phydev) {
|
||||
netif_err(priv, ifup, net_dev, "init_phy() failed\n");
|
||||
return -ENODEV;
|
||||
err = -ENODEV;
|
||||
goto phy_init_failed;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) {
|
||||
|
@ -2314,6 +2316,7 @@ mac_start_failed:
|
|||
for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++)
|
||||
fman_port_disable(mac_dev->port[i]);
|
||||
|
||||
phy_init_failed:
|
||||
dpaa_eth_napi_disable(priv);
|
||||
|
||||
return err;
|
||||
|
@ -2420,6 +2423,7 @@ static int dpaa_ingress_cgr_init(struct dpaa_priv *priv)
|
|||
}
|
||||
|
||||
/* Enable CS TD, but disable Congestion State Change Notifications. */
|
||||
memset(&initcgr, 0, sizeof(initcgr));
|
||||
initcgr.we_mask = cpu_to_be16(QM_CGR_WE_CS_THRES);
|
||||
initcgr.cgr.cscn_en = QM_CGR_EN;
|
||||
cs_th = DPAA_INGRESS_CS_THRESHOLD;
|
||||
|
|
|
@ -245,13 +245,9 @@ static u32 freq_to_shift(u16 freq)
|
|||
{
|
||||
u32 freq_khz = freq * 1000;
|
||||
u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
|
||||
u64 tmp_rounded =
|
||||
roundup_pow_of_two(max_val_cycles) > max_val_cycles ?
|
||||
roundup_pow_of_two(max_val_cycles) - 1 : UINT_MAX;
|
||||
u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
|
||||
max_val_cycles : tmp_rounded;
|
||||
u64 max_val_cycles_rounded = 1ULL << fls64(max_val_cycles - 1);
|
||||
/* calculate max possible multiplier in order to fit in 64bit */
|
||||
u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
|
||||
u64 max_mul = div64_u64(ULLONG_MAX, max_val_cycles_rounded);
|
||||
|
||||
/* This comes from the reverse of clocksource_khz2mult */
|
||||
return ilog2(div_u64(max_mul * freq_khz, 1000000));
|
||||
|
|
|
@ -445,8 +445,14 @@ int mlx4_en_activate_rx_rings(struct mlx4_en_priv *priv)
|
|||
ring->cqn = priv->rx_cq[ring_ind]->mcq.cqn;
|
||||
|
||||
ring->stride = stride;
|
||||
if (ring->stride <= TXBB_SIZE)
|
||||
if (ring->stride <= TXBB_SIZE) {
|
||||
/* Stamp first unused send wqe */
|
||||
__be32 *ptr = (__be32 *)ring->buf;
|
||||
__be32 stamp = cpu_to_be32(1 << STAMP_SHIFT);
|
||||
*ptr = stamp;
|
||||
/* Move pointer to start of rx section */
|
||||
ring->buf += TXBB_SIZE;
|
||||
}
|
||||
|
||||
ring->log_stride = ffs(ring->stride) - 1;
|
||||
ring->buf_size = ring->size * ring->stride;
|
||||
|
|
|
@ -118,8 +118,13 @@ static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
|
|||
if (!buf)
|
||||
return -ENOMEM;
|
||||
|
||||
if (offset_in_page(buf)) {
|
||||
dma_free_coherent(dev, PAGE_SIZE << order,
|
||||
buf, sg_dma_address(mem));
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
sg_set_buf(mem, buf, PAGE_SIZE << order);
|
||||
BUG_ON(mem->offset);
|
||||
sg_dma_len(mem) = PAGE_SIZE << order;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -42,6 +42,7 @@
|
|||
#include <linux/io-mapping.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/kmod.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <net/devlink.h>
|
||||
|
||||
#include <linux/mlx4/device.h>
|
||||
|
@ -782,6 +783,23 @@ int mlx4_is_slave_active(struct mlx4_dev *dev, int slave)
|
|||
}
|
||||
EXPORT_SYMBOL(mlx4_is_slave_active);
|
||||
|
||||
void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
|
||||
struct _rule_hw *eth_header)
|
||||
{
|
||||
if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
|
||||
is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
|
||||
struct mlx4_net_trans_rule_hw_eth *eth =
|
||||
(struct mlx4_net_trans_rule_hw_eth *)eth_header;
|
||||
struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
|
||||
bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
|
||||
next_rule->rsvd == 0;
|
||||
|
||||
if (last_rule)
|
||||
ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(mlx4_handle_eth_header_mcast_prio);
|
||||
|
||||
static void slave_adjust_steering_mode(struct mlx4_dev *dev,
|
||||
struct mlx4_dev_cap *dev_cap,
|
||||
struct mlx4_init_hca_param *hca_param)
|
||||
|
|
|
@ -4164,22 +4164,6 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
|
||||
struct _rule_hw *eth_header)
|
||||
{
|
||||
if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
|
||||
is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
|
||||
struct mlx4_net_trans_rule_hw_eth *eth =
|
||||
(struct mlx4_net_trans_rule_hw_eth *)eth_header;
|
||||
struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
|
||||
bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
|
||||
next_rule->rsvd == 0;
|
||||
|
||||
if (last_rule)
|
||||
ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* In case of missing eth header, append eth header with a MAC address
|
||||
* assigned to the VF.
|
||||
|
@ -4363,10 +4347,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
|
|||
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
|
||||
|
||||
if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
|
||||
handle_eth_header_mcast_prio(ctrl, rule_header);
|
||||
|
||||
if (slave == dev->caps.function)
|
||||
goto execute;
|
||||
mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
|
||||
|
||||
switch (header_id) {
|
||||
case MLX4_NET_TRANS_RULE_ID_ETH:
|
||||
|
@ -4394,7 +4375,6 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
|
|||
goto err_put_qp;
|
||||
}
|
||||
|
||||
execute:
|
||||
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
|
||||
vhcr->in_modifier, 0,
|
||||
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
|
||||
|
@ -4473,6 +4453,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
|
|||
struct res_qp *rqp;
|
||||
struct res_fs_rule *rrule;
|
||||
u64 mirr_reg_id;
|
||||
int qpn;
|
||||
|
||||
if (dev->caps.steering_mode !=
|
||||
MLX4_STEERING_MODE_DEVICE_MANAGED)
|
||||
|
@ -4489,10 +4470,11 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
|
|||
}
|
||||
mirr_reg_id = rrule->mirr_rule_id;
|
||||
kfree(rrule->mirr_mbox);
|
||||
qpn = rrule->qpn;
|
||||
|
||||
/* Release the rule form busy state before removal */
|
||||
put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
|
||||
err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
|
||||
err = get_res(dev, slave, qpn, RES_QP, &rqp);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -4517,7 +4499,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
|
|||
if (!err)
|
||||
atomic_dec(&rqp->ref_count);
|
||||
out:
|
||||
put_res(dev, slave, rrule->qpn, RES_QP);
|
||||
put_res(dev, slave, qpn, RES_QP);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
|
|
@ -723,6 +723,9 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
|
|||
int i;
|
||||
struct ieee_ets ets;
|
||||
|
||||
if (!MLX5_CAP_GEN(priv->mdev, ets))
|
||||
return;
|
||||
|
||||
memset(&ets, 0, sizeof(ets));
|
||||
ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
|
||||
for (i = 0; i < ets.ets_cap; i++) {
|
||||
|
|
|
@ -171,7 +171,6 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
|
|||
return NUM_SW_COUNTERS +
|
||||
MLX5E_NUM_Q_CNTRS(priv) +
|
||||
NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
|
||||
NUM_PCIE_COUNTERS +
|
||||
MLX5E_NUM_RQ_STATS(priv) +
|
||||
MLX5E_NUM_SQ_STATS(priv) +
|
||||
MLX5E_NUM_PFC_COUNTERS(priv) +
|
||||
|
@ -219,14 +218,6 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
|
|||
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||
pport_2819_stats_desc[i].format);
|
||||
|
||||
for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||
pcie_perf_stats_desc[i].format);
|
||||
|
||||
for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
|
||||
strcpy(data + (idx++) * ETH_GSTRING_LEN,
|
||||
pcie_tas_stats_desc[i].format);
|
||||
|
||||
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
|
||||
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
|
||||
sprintf(data + (idx++) * ETH_GSTRING_LEN,
|
||||
|
@ -339,14 +330,6 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
|
|||
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
|
||||
pport_2819_stats_desc, i);
|
||||
|
||||
for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
|
||||
data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
|
||||
pcie_perf_stats_desc, i);
|
||||
|
||||
for (i = 0; i < NUM_PCIE_TAS_COUNTERS; i++)
|
||||
data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_tas_counters,
|
||||
pcie_tas_stats_desc, i);
|
||||
|
||||
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
|
||||
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
|
||||
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
|
||||
|
|
|
@ -247,6 +247,7 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
|
|||
}
|
||||
if (fs->flow_type & FLOW_MAC_EXT &&
|
||||
!is_zero_ether_addr(fs->m_ext.h_dest)) {
|
||||
mask_spec(fs->m_ext.h_dest, fs->h_ext.h_dest, ETH_ALEN);
|
||||
ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4,
|
||||
outer_headers_c, dmac_47_16),
|
||||
fs->m_ext.h_dest);
|
||||
|
|
|
@ -291,36 +291,12 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
|
|||
&qcnt->rx_out_of_buffer);
|
||||
}
|
||||
|
||||
static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
|
||||
struct mlx5_core_dev *mdev = priv->mdev;
|
||||
int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
|
||||
void *out;
|
||||
u32 *in;
|
||||
|
||||
in = mlx5_vzalloc(sz);
|
||||
if (!in)
|
||||
return;
|
||||
|
||||
out = pcie_stats->pcie_perf_counters;
|
||||
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
|
||||
|
||||
out = pcie_stats->pcie_tas_counters;
|
||||
MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP);
|
||||
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
|
||||
|
||||
kvfree(in);
|
||||
}
|
||||
|
||||
void mlx5e_update_stats(struct mlx5e_priv *priv)
|
||||
{
|
||||
mlx5e_update_q_counter(priv);
|
||||
mlx5e_update_vport_counters(priv);
|
||||
mlx5e_update_pport_counters(priv);
|
||||
mlx5e_update_sw_counters(priv);
|
||||
mlx5e_update_pcie_counters(priv);
|
||||
}
|
||||
|
||||
void mlx5e_update_stats_work(struct work_struct *work)
|
||||
|
@ -3805,14 +3781,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
|
|||
|
||||
mlx5_lag_add(mdev, netdev);
|
||||
|
||||
if (mlx5e_vxlan_allowed(mdev)) {
|
||||
rtnl_lock();
|
||||
udp_tunnel_get_rx_info(netdev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
mlx5e_enable_async_events(priv);
|
||||
queue_work(priv->wq, &priv->set_rx_mode_work);
|
||||
|
||||
if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
|
||||
mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
|
||||
|
@ -3822,6 +3791,18 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
|
|||
rep.netdev = netdev;
|
||||
mlx5_eswitch_register_vport_rep(esw, 0, &rep);
|
||||
}
|
||||
|
||||
if (netdev->reg_state != NETREG_REGISTERED)
|
||||
return;
|
||||
|
||||
/* Device already registered: sync netdev system state */
|
||||
if (mlx5e_vxlan_allowed(mdev)) {
|
||||
rtnl_lock();
|
||||
udp_tunnel_get_rx_info(netdev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
queue_work(priv->wq, &priv->set_rx_mode_work);
|
||||
}
|
||||
|
||||
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
|
||||
|
@ -3966,10 +3947,6 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
|
|||
const struct mlx5e_profile *profile = priv->profile;
|
||||
|
||||
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
|
||||
if (profile->disable)
|
||||
profile->disable(priv);
|
||||
|
||||
flush_workqueue(priv->wq);
|
||||
|
||||
rtnl_lock();
|
||||
if (netif_running(netdev))
|
||||
|
@ -3977,6 +3954,10 @@ void mlx5e_detach_netdev(struct mlx5_core_dev *mdev, struct net_device *netdev)
|
|||
netif_device_detach(netdev);
|
||||
rtnl_unlock();
|
||||
|
||||
if (profile->disable)
|
||||
profile->disable(priv);
|
||||
flush_workqueue(priv->wq);
|
||||
|
||||
mlx5e_destroy_q_counter(priv);
|
||||
profile->cleanup_rx(priv);
|
||||
mlx5e_close_drop_rq(priv);
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
|
||||
(*(u32 *)((char *)ptr + dsc[i].offset))
|
||||
#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
|
||||
be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
|
||||
be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
|
||||
|
||||
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
|
||||
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
|
||||
|
@ -276,32 +276,6 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
|
|||
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
|
||||
};
|
||||
|
||||
#define PCIE_PERF_OFF(c) \
|
||||
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
|
||||
#define PCIE_PERF_GET(pcie_stats, c) \
|
||||
MLX5_GET(mpcnt_reg, pcie_stats->pcie_perf_counters, \
|
||||
counter_set.pcie_perf_cntrs_grp_data_layout.c)
|
||||
#define PCIE_TAS_OFF(c) \
|
||||
MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_tas_cntrs_grp_data_layout.c)
|
||||
#define PCIE_TAS_GET(pcie_stats, c) \
|
||||
MLX5_GET(mpcnt_reg, pcie_stats->pcie_tas_counters, \
|
||||
counter_set.pcie_tas_cntrs_grp_data_layout.c)
|
||||
|
||||
struct mlx5e_pcie_stats {
|
||||
__be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
|
||||
__be64 pcie_tas_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
|
||||
};
|
||||
|
||||
static const struct counter_desc pcie_perf_stats_desc[] = {
|
||||
{ "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
|
||||
{ "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
|
||||
};
|
||||
|
||||
static const struct counter_desc pcie_tas_stats_desc[] = {
|
||||
{ "tx_pci_transport_nonfatal_msg", PCIE_TAS_OFF(non_fatal_err_msg_sent) },
|
||||
{ "tx_pci_transport_fatal_msg", PCIE_TAS_OFF(fatal_err_msg_sent) },
|
||||
};
|
||||
|
||||
struct mlx5e_rq_stats {
|
||||
u64 packets;
|
||||
u64 bytes;
|
||||
|
@ -386,8 +360,6 @@ static const struct counter_desc sq_stats_desc[] = {
|
|||
#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
|
||||
#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
|
||||
#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
|
||||
#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
|
||||
#define NUM_PCIE_TAS_COUNTERS ARRAY_SIZE(pcie_tas_stats_desc)
|
||||
#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
|
||||
ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
|
||||
#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
|
||||
|
@ -397,7 +369,6 @@ static const struct counter_desc sq_stats_desc[] = {
|
|||
NUM_PPORT_2819_COUNTERS + \
|
||||
NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
|
||||
NUM_PPORT_PRIO)
|
||||
#define NUM_PCIE_COUNTERS (NUM_PCIE_PERF_COUNTERS + NUM_PCIE_TAS_COUNTERS)
|
||||
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
|
||||
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
|
||||
|
||||
|
@ -406,7 +377,6 @@ struct mlx5e_stats {
|
|||
struct mlx5e_qcounter_stats qcnt;
|
||||
struct mlx5e_vport_stats vport;
|
||||
struct mlx5e_pport_stats pport;
|
||||
struct mlx5e_pcie_stats pcie;
|
||||
struct rtnl_link_stats64 vf_vport;
|
||||
};
|
||||
|
||||
|
|
|
@ -1860,7 +1860,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
|
|||
|
||||
if (!ESW_ALLOWED(esw))
|
||||
return -EPERM;
|
||||
if (!LEGAL_VPORT(esw, vport))
|
||||
if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
|
|
|
@ -695,6 +695,12 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
|
|||
if (err)
|
||||
goto err_reps;
|
||||
}
|
||||
|
||||
/* disable PF RoCE so missed packets don't go through RoCE steering */
|
||||
mlx5_dev_list_lock();
|
||||
mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_dev_list_unlock();
|
||||
|
||||
return 0;
|
||||
|
||||
err_reps:
|
||||
|
@ -718,6 +724,11 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw)
|
|||
{
|
||||
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
|
||||
|
||||
/* enable back PF RoCE */
|
||||
mlx5_dev_list_lock();
|
||||
mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||
mlx5_dev_list_unlock();
|
||||
|
||||
mlx5_eswitch_disable_sriov(esw);
|
||||
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
|
||||
if (err) {
|
||||
|
|
|
@ -1263,6 +1263,7 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
|
|||
nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
|
||||
handle = add_rule_fte(fte, fg, dest, dest_num, false);
|
||||
if (IS_ERR(handle)) {
|
||||
unlock_ref_node(&fte->node);
|
||||
kfree(fte);
|
||||
goto unlock_fg;
|
||||
}
|
||||
|
|
|
@ -523,6 +523,13 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
|
|||
MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
|
||||
to_fw_pkey_sz(dev, 128));
|
||||
|
||||
/* Check log_max_qp from HCA caps to set in current profile */
|
||||
if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < profile[prof_sel].log_max_qp) {
|
||||
mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
|
||||
profile[prof_sel].log_max_qp,
|
||||
MLX5_CAP_GEN_MAX(dev, log_max_qp));
|
||||
profile[prof_sel].log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
|
||||
}
|
||||
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
|
||||
MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
|
||||
prof->log_max_qp);
|
||||
|
@ -595,7 +602,6 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
|||
struct mlx5_priv *priv = &mdev->priv;
|
||||
struct msix_entry *msix = priv->msix_arr;
|
||||
int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
|
||||
int numa_node = priv->numa_node;
|
||||
int err;
|
||||
|
||||
if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
|
||||
|
@ -603,7 +609,7 @@ static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
cpumask_set_cpu(cpumask_local_spread(i, numa_node),
|
||||
cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node),
|
||||
priv->irq_info[i].mask);
|
||||
|
||||
err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
|
||||
|
@ -1210,6 +1216,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
|
|||
{
|
||||
int err = 0;
|
||||
|
||||
mlx5_drain_health_wq(dev);
|
||||
|
||||
mutex_lock(&dev->intf_state_mutex);
|
||||
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
|
||||
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
|
||||
|
@ -1388,10 +1396,9 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
|
|||
|
||||
mlx5_enter_error_state(dev);
|
||||
mlx5_unload_one(dev, priv, false);
|
||||
/* In case of kernel call save the pci state and drain health wq */
|
||||
/* In case of kernel call save the pci state */
|
||||
if (state) {
|
||||
pci_save_state(pdev);
|
||||
mlx5_drain_health_wq(dev);
|
||||
mlx5_pci_disable_device(dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -815,6 +815,7 @@ static struct sh_eth_cpu_data sh7734_data = {
|
|||
.tsu = 1,
|
||||
.hw_crc = 1,
|
||||
.select_mii = 1,
|
||||
.shift_rd0 = 1,
|
||||
};
|
||||
|
||||
/* SH7763 */
|
||||
|
@ -1653,7 +1654,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
|
|||
else
|
||||
goto out;
|
||||
|
||||
if (!likely(mdp->irq_enabled)) {
|
||||
if (unlikely(!mdp->irq_enabled)) {
|
||||
sh_eth_write(ndev, 0, EESIPR);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -1323,7 +1323,8 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
|
|||
}
|
||||
|
||||
/* don't fail init if RSS setup doesn't work */
|
||||
efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
|
||||
rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
|
||||
efx->rss_active = (rc == 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -975,6 +975,8 @@ efx_ethtool_get_rxnfc(struct net_device *net_dev,
|
|||
|
||||
case ETHTOOL_GRXFH: {
|
||||
info->data = 0;
|
||||
if (!efx->rss_active) /* No RSS */
|
||||
return 0;
|
||||
switch (info->flow_type) {
|
||||
case UDP_V4_FLOW:
|
||||
if (efx->rx_hash_udp_4tuple)
|
||||
|
|
|
@ -860,6 +860,7 @@ struct vfdi_status;
|
|||
* @rx_hash_key: Toeplitz hash key for RSS
|
||||
* @rx_indir_table: Indirection table for RSS
|
||||
* @rx_scatter: Scatter mode enabled for receives
|
||||
* @rss_active: RSS enabled on hardware
|
||||
* @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled
|
||||
* @int_error_count: Number of internal errors seen recently
|
||||
* @int_error_expire: Time at which error count will be expired
|
||||
|
@ -998,6 +999,7 @@ struct efx_nic {
|
|||
u8 rx_hash_key[40];
|
||||
u32 rx_indir_table[128];
|
||||
bool rx_scatter;
|
||||
bool rss_active;
|
||||
bool rx_hash_udp_4tuple;
|
||||
|
||||
unsigned int_error_count;
|
||||
|
|
|
@ -403,6 +403,7 @@ static int siena_init_nic(struct efx_nic *efx)
|
|||
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
|
||||
|
||||
siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
|
||||
efx->rss_active = true;
|
||||
|
||||
/* Enable event logging */
|
||||
rc = efx_mcdi_log_ctrl(efx, true, false, 0);
|
||||
|
|
|
@ -60,8 +60,9 @@ struct oxnas_dwmac {
|
|||
struct regmap *regmap;
|
||||
};
|
||||
|
||||
static int oxnas_dwmac_init(struct oxnas_dwmac *dwmac)
|
||||
static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
|
||||
{
|
||||
struct oxnas_dwmac *dwmac = priv;
|
||||
unsigned int value;
|
||||
int ret;
|
||||
|
||||
|
@ -105,20 +106,20 @@ static int oxnas_dwmac_init(struct oxnas_dwmac *dwmac)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
|
||||
{
|
||||
struct oxnas_dwmac *dwmac = priv;
|
||||
|
||||
clk_disable_unprepare(dwmac->clk);
|
||||
}
|
||||
|
||||
static int oxnas_dwmac_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct plat_stmmacenet_data *plat_dat;
|
||||
struct stmmac_resources stmmac_res;
|
||||
struct device_node *sysctrl;
|
||||
struct oxnas_dwmac *dwmac;
|
||||
int ret;
|
||||
|
||||
sysctrl = of_parse_phandle(pdev->dev.of_node, "oxsemi,sys-ctrl", 0);
|
||||
if (!sysctrl) {
|
||||
dev_err(&pdev->dev, "failed to get sys-ctrl node\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = stmmac_get_platform_resources(pdev, &stmmac_res);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -128,73 +129,49 @@ static int oxnas_dwmac_probe(struct platform_device *pdev)
|
|||
return PTR_ERR(plat_dat);
|
||||
|
||||
dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
|
||||
if (!dwmac)
|
||||
return -ENOMEM;
|
||||
if (!dwmac) {
|
||||
ret = -ENOMEM;
|
||||
goto err_remove_config_dt;
|
||||
}
|
||||
|
||||
dwmac->dev = &pdev->dev;
|
||||
plat_dat->bsp_priv = dwmac;
|
||||
plat_dat->init = oxnas_dwmac_init;
|
||||
plat_dat->exit = oxnas_dwmac_exit;
|
||||
|
||||
dwmac->regmap = syscon_node_to_regmap(sysctrl);
|
||||
dwmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
|
||||
"oxsemi,sys-ctrl");
|
||||
if (IS_ERR(dwmac->regmap)) {
|
||||
dev_err(&pdev->dev, "failed to have sysctrl regmap\n");
|
||||
return PTR_ERR(dwmac->regmap);
|
||||
ret = PTR_ERR(dwmac->regmap);
|
||||
goto err_remove_config_dt;
|
||||
}
|
||||
|
||||
dwmac->clk = devm_clk_get(&pdev->dev, "gmac");
|
||||
if (IS_ERR(dwmac->clk))
|
||||
return PTR_ERR(dwmac->clk);
|
||||
if (IS_ERR(dwmac->clk)) {
|
||||
ret = PTR_ERR(dwmac->clk);
|
||||
goto err_remove_config_dt;
|
||||
}
|
||||
|
||||
ret = oxnas_dwmac_init(dwmac);
|
||||
ret = oxnas_dwmac_init(pdev, plat_dat->bsp_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_remove_config_dt;
|
||||
|
||||
ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
|
||||
if (ret)
|
||||
clk_disable_unprepare(dwmac->clk);
|
||||
goto err_dwmac_exit;
|
||||
|
||||
|
||||
return 0;
|
||||
|
||||
err_dwmac_exit:
|
||||
oxnas_dwmac_exit(pdev, plat_dat->bsp_priv);
|
||||
err_remove_config_dt:
|
||||
stmmac_remove_config_dt(pdev, plat_dat);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int oxnas_dwmac_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
|
||||
int ret = stmmac_dvr_remove(&pdev->dev);
|
||||
|
||||
clk_disable_unprepare(dwmac->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int oxnas_dwmac_suspend(struct device *dev)
|
||||
{
|
||||
struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
|
||||
int ret;
|
||||
|
||||
ret = stmmac_suspend(dev);
|
||||
clk_disable_unprepare(dwmac->clk);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int oxnas_dwmac_resume(struct device *dev)
|
||||
{
|
||||
struct oxnas_dwmac *dwmac = get_stmmac_bsp_priv(dev);
|
||||
int ret;
|
||||
|
||||
ret = oxnas_dwmac_init(dwmac);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = stmmac_resume(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_PM_SLEEP */
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(oxnas_dwmac_pm_ops,
|
||||
oxnas_dwmac_suspend, oxnas_dwmac_resume);
|
||||
|
||||
static const struct of_device_id oxnas_dwmac_match[] = {
|
||||
{ .compatible = "oxsemi,ox820-dwmac" },
|
||||
{ }
|
||||
|
@ -203,10 +180,10 @@ MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
|
|||
|
||||
static struct platform_driver oxnas_dwmac_driver = {
|
||||
.probe = oxnas_dwmac_probe,
|
||||
.remove = oxnas_dwmac_remove,
|
||||
.remove = stmmac_pltfr_remove,
|
||||
.driver = {
|
||||
.name = "oxnas-dwmac",
|
||||
.pm = &oxnas_dwmac_pm_ops,
|
||||
.pm = &stmmac_pltfr_pm_ops,
|
||||
.of_match_table = oxnas_dwmac_match,
|
||||
},
|
||||
};
|
||||
|
|
|
@ -3365,13 +3365,6 @@ int stmmac_dvr_probe(struct device *device,
|
|||
|
||||
spin_lock_init(&priv->lock);
|
||||
|
||||
ret = register_netdev(ndev);
|
||||
if (ret) {
|
||||
netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
|
||||
__func__, ret);
|
||||
goto error_netdev_register;
|
||||
}
|
||||
|
||||
/* If a specific clk_csr value is passed from the platform
|
||||
* this means that the CSR Clock Range selection cannot be
|
||||
* changed at run-time and it is fixed. Viceversa the driver'll try to
|
||||
|
@ -3398,11 +3391,21 @@ int stmmac_dvr_probe(struct device *device,
|
|||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
ret = register_netdev(ndev);
|
||||
if (ret) {
|
||||
netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
|
||||
__func__, ret);
|
||||
goto error_netdev_register;
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
error_mdio_register:
|
||||
unregister_netdev(ndev);
|
||||
error_netdev_register:
|
||||
if (priv->hw->pcs != STMMAC_PCS_RGMII &&
|
||||
priv->hw->pcs != STMMAC_PCS_TBI &&
|
||||
priv->hw->pcs != STMMAC_PCS_RTBI)
|
||||
stmmac_mdio_unregister(ndev);
|
||||
error_mdio_register:
|
||||
netif_napi_del(&priv->napi);
|
||||
error_hw_init:
|
||||
clk_disable_unprepare(priv->pclk);
|
||||
|
|
|
@ -1367,6 +1367,7 @@ static struct usb_driver asix_driver = {
|
|||
.probe = usbnet_probe,
|
||||
.suspend = asix_suspend,
|
||||
.resume = asix_resume,
|
||||
.reset_resume = asix_resume,
|
||||
.disconnect = usbnet_disconnect,
|
||||
.supports_autosuspend = 1,
|
||||
.disable_hub_initiated_lpm = 1,
|
||||
|
|
|
@ -967,6 +967,7 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
|
|||
*/
|
||||
need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
|
||||
if (!ipv6_ndisc_frame(skb) && !need_strict) {
|
||||
vrf_rx_stats(vrf_dev, skb->len);
|
||||
skb->dev = vrf_dev;
|
||||
skb->skb_iif = vrf_dev->ifindex;
|
||||
|
||||
|
@ -1011,6 +1012,8 @@ static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
|
|||
goto out;
|
||||
}
|
||||
|
||||
vrf_rx_stats(vrf_dev, skb->len);
|
||||
|
||||
skb_push(skb, skb->mac_len);
|
||||
dev_queue_xmit_nit(skb, vrf_dev);
|
||||
skb_pull(skb, skb->mac_len);
|
||||
|
|
|
@ -218,7 +218,7 @@ static int slic_ds26522_probe(struct spi_device *spi)
|
|||
|
||||
ret = slic_ds26522_init_configure(spi);
|
||||
if (ret == 0)
|
||||
pr_info("DS26522 cs%d configurated\n", spi->chip_select);
|
||||
pr_info("DS26522 cs%d configured\n", spi->chip_select);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -1193,8 +1193,8 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
|
|||
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
|
||||
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
|
||||
}
|
||||
if (ctrl->stripe_size)
|
||||
blk_queue_chunk_sectors(q, ctrl->stripe_size >> 9);
|
||||
if (ctrl->quirks & NVME_QUIRK_STRIPE_SIZE)
|
||||
blk_queue_chunk_sectors(q, ctrl->max_hw_sectors);
|
||||
blk_queue_virt_boundary(q, ctrl->page_size - 1);
|
||||
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
|
||||
vwc = true;
|
||||
|
@ -1250,19 +1250,6 @@ int nvme_init_identify(struct nvme_ctrl *ctrl)
|
|||
ctrl->max_hw_sectors =
|
||||
min_not_zero(ctrl->max_hw_sectors, max_hw_sectors);
|
||||
|
||||
if ((ctrl->quirks & NVME_QUIRK_STRIPE_SIZE) && id->vs[3]) {
|
||||
unsigned int max_hw_sectors;
|
||||
|
||||
ctrl->stripe_size = 1 << (id->vs[3] + page_shift);
|
||||
max_hw_sectors = ctrl->stripe_size >> (page_shift - 9);
|
||||
if (ctrl->max_hw_sectors) {
|
||||
ctrl->max_hw_sectors = min(max_hw_sectors,
|
||||
ctrl->max_hw_sectors);
|
||||
} else {
|
||||
ctrl->max_hw_sectors = max_hw_sectors;
|
||||
}
|
||||
}
|
||||
|
||||
nvme_set_queue_limits(ctrl, ctrl->admin_q);
|
||||
ctrl->sgls = le32_to_cpu(id->sgls);
|
||||
ctrl->kas = le16_to_cpu(id->kas);
|
||||
|
|
|
@ -1491,19 +1491,20 @@ static int
|
|||
nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize)
|
||||
{
|
||||
struct nvme_fc_queue *queue = &ctrl->queues[1];
|
||||
int i, j, ret;
|
||||
int i, ret;
|
||||
|
||||
for (i = 1; i < ctrl->queue_count; i++, queue++) {
|
||||
ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize);
|
||||
if (ret) {
|
||||
for (j = i-1; j >= 0; j--)
|
||||
__nvme_fc_delete_hw_queue(ctrl,
|
||||
&ctrl->queues[j], j);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto delete_queues;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
delete_queues:
|
||||
for (; i >= 0; i--)
|
||||
__nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
|
@ -2401,8 +2402,8 @@ __nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
|||
WARN_ON_ONCE(!changed);
|
||||
|
||||
dev_info(ctrl->ctrl.device,
|
||||
"NVME-FC{%d}: new ctrl: NQN \"%s\" (%p)\n",
|
||||
ctrl->cnum, ctrl->ctrl.opts->subsysnqn, &ctrl);
|
||||
"NVME-FC{%d}: new ctrl: NQN \"%s\"\n",
|
||||
ctrl->cnum, ctrl->ctrl.opts->subsysnqn);
|
||||
|
||||
kref_get(&ctrl->ctrl.kref);
|
||||
|
||||
|
|
|
@ -135,7 +135,6 @@ struct nvme_ctrl {
|
|||
|
||||
u32 page_size;
|
||||
u32 max_hw_sectors;
|
||||
u32 stripe_size;
|
||||
u16 oncs;
|
||||
u16 vid;
|
||||
atomic_t abort_limit;
|
||||
|
|
|
@ -712,15 +712,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
|
|||
req = blk_mq_tag_to_rq(*nvmeq->tags, cqe.command_id);
|
||||
nvme_req(req)->result = cqe.result;
|
||||
blk_mq_complete_request(req, le16_to_cpu(cqe.status) >> 1);
|
||||
|
||||
}
|
||||
|
||||
/* If the controller ignores the cq head doorbell and continuously
|
||||
* writes to the queue, it is theoretically possible to wrap around
|
||||
* the queue twice and mistakenly return IRQ_NONE. Linux only
|
||||
* requires that 0.1% of your interrupts are handled, so this isn't
|
||||
* a big problem.
|
||||
*/
|
||||
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
|
||||
return;
|
||||
|
||||
|
@ -1909,10 +1902,10 @@ static int nvme_dev_map(struct nvme_dev *dev)
|
|||
if (!dev->bar)
|
||||
goto release;
|
||||
|
||||
return 0;
|
||||
return 0;
|
||||
release:
|
||||
pci_release_mem_regions(pdev);
|
||||
return -ENODEV;
|
||||
pci_release_mem_regions(pdev);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
|
|
|
@ -2160,30 +2160,6 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
|
|||
return nvme_trans_status_code(hdr, nvme_sc);
|
||||
}
|
||||
|
||||
static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||
u8 *cmd)
|
||||
{
|
||||
u8 immed, no_flush;
|
||||
|
||||
immed = cmd[1] & 0x01;
|
||||
no_flush = cmd[4] & 0x04;
|
||||
|
||||
if (immed != 0) {
|
||||
return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
|
||||
ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
|
||||
SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
|
||||
} else {
|
||||
if (no_flush == 0) {
|
||||
/* Issue NVME FLUSH command prior to START STOP UNIT */
|
||||
int res = nvme_trans_synchronize_cache(ns, hdr);
|
||||
if (res)
|
||||
return res;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||
u8 *cmd)
|
||||
{
|
||||
|
@ -2439,9 +2415,6 @@ static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
|
|||
case SECURITY_PROTOCOL_OUT:
|
||||
retcode = nvme_trans_security_protocol(ns, hdr, cmd);
|
||||
break;
|
||||
case START_STOP:
|
||||
retcode = nvme_trans_start_stop(ns, hdr, cmd);
|
||||
break;
|
||||
case SYNCHRONIZE_CACHE:
|
||||
retcode = nvme_trans_synchronize_cache(ns, hdr);
|
||||
break;
|
||||
|
|
|
@ -382,7 +382,6 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
|
|||
{
|
||||
struct nvmet_subsys *subsys = req->sq->ctrl->subsys;
|
||||
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10[0]);
|
||||
u64 val;
|
||||
u32 val32;
|
||||
u16 status = 0;
|
||||
|
||||
|
@ -392,8 +391,7 @@ static void nvmet_execute_set_features(struct nvmet_req *req)
|
|||
(subsys->max_qid - 1) | ((subsys->max_qid - 1) << 16));
|
||||
break;
|
||||
case NVME_FEAT_KATO:
|
||||
val = le64_to_cpu(req->cmd->prop_set.value);
|
||||
val32 = val & 0xffff;
|
||||
val32 = le32_to_cpu(req->cmd->common.cdw10[1]);
|
||||
req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000);
|
||||
nvmet_set_result(req, req->sq->ctrl->kato);
|
||||
break;
|
||||
|
|
|
@ -845,7 +845,7 @@ fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
|
|||
rport->lport = nport->lport;
|
||||
nport->rport = rport;
|
||||
|
||||
return ret ? ret : count;
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
|
@ -952,7 +952,7 @@ fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
|
|||
tport->lport = nport->lport;
|
||||
nport->tport = tport;
|
||||
|
||||
return ret ? ret : count;
|
||||
return count;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -308,6 +308,11 @@ static int cobalt_lcdfb_probe(struct platform_device *dev)
|
|||
info->screen_size = resource_size(res);
|
||||
info->screen_base = devm_ioremap(&dev->dev, res->start,
|
||||
info->screen_size);
|
||||
if (!info->screen_base) {
|
||||
framebuffer_release(info);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
info->fbops = &cobalt_lcd_fbops;
|
||||
info->fix = cobalt_lcdfb_fix;
|
||||
info->fix.smem_start = res->start;
|
||||
|
|
|
@ -328,6 +328,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
|||
struct file *file = iocb->ki_filp;
|
||||
struct inode *inode = bdev_file_inode(file);
|
||||
struct block_device *bdev = I_BDEV(inode);
|
||||
struct blk_plug plug;
|
||||
struct blkdev_dio *dio;
|
||||
struct bio *bio;
|
||||
bool is_read = (iov_iter_rw(iter) == READ);
|
||||
|
@ -353,6 +354,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
|||
dio->multi_bio = false;
|
||||
dio->should_dirty = is_read && (iter->type == ITER_IOVEC);
|
||||
|
||||
blk_start_plug(&plug);
|
||||
for (;;) {
|
||||
bio->bi_bdev = bdev;
|
||||
bio->bi_iter.bi_sector = pos >> 9;
|
||||
|
@ -394,6 +396,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
|||
submit_bio(bio);
|
||||
bio = bio_alloc(GFP_KERNEL, nr_pages);
|
||||
}
|
||||
blk_finish_plug(&plug);
|
||||
|
||||
if (!dio->is_sync)
|
||||
return -EIOCBQUEUED;
|
||||
|
|
|
@ -1660,7 +1660,7 @@ void clean_bdev_aliases(struct block_device *bdev, sector_t block, sector_t len)
|
|||
head = page_buffers(page);
|
||||
bh = head;
|
||||
do {
|
||||
if (!buffer_mapped(bh))
|
||||
if (!buffer_mapped(bh) || (bh->b_blocknr < block))
|
||||
goto next;
|
||||
if (bh->b_blocknr >= block + len)
|
||||
break;
|
||||
|
|
|
@ -248,7 +248,8 @@ retry:
|
|||
goto out;
|
||||
|
||||
if (fscrypt_dummy_context_enabled(inode)) {
|
||||
memset(raw_key, 0x42, FS_AES_256_XTS_KEY_SIZE);
|
||||
memset(raw_key, 0x42, keysize/2);
|
||||
memset(raw_key+keysize/2, 0x24, keysize - (keysize/2));
|
||||
goto got_key;
|
||||
}
|
||||
|
||||
|
|
|
@ -179,6 +179,11 @@ int fscrypt_has_permitted_context(struct inode *parent, struct inode *child)
|
|||
BUG_ON(1);
|
||||
}
|
||||
|
||||
/* No restrictions on file types which are never encrypted */
|
||||
if (!S_ISREG(child->i_mode) && !S_ISDIR(child->i_mode) &&
|
||||
!S_ISLNK(child->i_mode))
|
||||
return 1;
|
||||
|
||||
/* no restrictions if the parent directory is not encrypted */
|
||||
if (!parent->i_sb->s_cop->is_encrypted(parent))
|
||||
return 1;
|
||||
|
|
249
fs/dax.c
249
fs/dax.c
|
@ -451,16 +451,37 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
|
|||
__wake_up(wq, TASK_NORMAL, wake_all ? 0 : 1, &key);
|
||||
}
|
||||
|
||||
static int __dax_invalidate_mapping_entry(struct address_space *mapping,
|
||||
pgoff_t index, bool trunc)
|
||||
{
|
||||
int ret = 0;
|
||||
void *entry;
|
||||
struct radix_tree_root *page_tree = &mapping->page_tree;
|
||||
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
entry = get_unlocked_mapping_entry(mapping, index, NULL);
|
||||
if (!entry || !radix_tree_exceptional_entry(entry))
|
||||
goto out;
|
||||
if (!trunc &&
|
||||
(radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
|
||||
radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE)))
|
||||
goto out;
|
||||
radix_tree_delete(page_tree, index);
|
||||
mapping->nrexceptional--;
|
||||
ret = 1;
|
||||
out:
|
||||
put_unlocked_mapping_entry(mapping, index, entry);
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
return ret;
|
||||
}
|
||||
/*
|
||||
* Delete exceptional DAX entry at @index from @mapping. Wait for radix tree
|
||||
* entry to get unlocked before deleting it.
|
||||
*/
|
||||
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
|
||||
{
|
||||
void *entry;
|
||||
int ret = __dax_invalidate_mapping_entry(mapping, index, true);
|
||||
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
entry = get_unlocked_mapping_entry(mapping, index, NULL);
|
||||
/*
|
||||
* This gets called from truncate / punch_hole path. As such, the caller
|
||||
* must hold locks protecting against concurrent modifications of the
|
||||
|
@ -468,16 +489,46 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
|
|||
* caller has seen exceptional entry for this index, we better find it
|
||||
* at that index as well...
|
||||
*/
|
||||
if (WARN_ON_ONCE(!entry || !radix_tree_exceptional_entry(entry))) {
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
return 0;
|
||||
}
|
||||
radix_tree_delete(&mapping->page_tree, index);
|
||||
mapping->nrexceptional--;
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
dax_wake_mapping_entry_waiter(mapping, index, entry, true);
|
||||
WARN_ON_ONCE(!ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 1;
|
||||
/*
|
||||
* Invalidate exceptional DAX entry if easily possible. This handles DAX
|
||||
* entries for invalidate_inode_pages() so we evict the entry only if we can
|
||||
* do so without blocking.
|
||||
*/
|
||||
int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index)
|
||||
{
|
||||
int ret = 0;
|
||||
void *entry, **slot;
|
||||
struct radix_tree_root *page_tree = &mapping->page_tree;
|
||||
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
entry = __radix_tree_lookup(page_tree, index, NULL, &slot);
|
||||
if (!entry || !radix_tree_exceptional_entry(entry) ||
|
||||
slot_locked(mapping, slot))
|
||||
goto out;
|
||||
if (radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_DIRTY) ||
|
||||
radix_tree_tag_get(page_tree, index, PAGECACHE_TAG_TOWRITE))
|
||||
goto out;
|
||||
radix_tree_delete(page_tree, index);
|
||||
mapping->nrexceptional--;
|
||||
ret = 1;
|
||||
out:
|
||||
spin_unlock_irq(&mapping->tree_lock);
|
||||
if (ret)
|
||||
dax_wake_mapping_entry_waiter(mapping, index, entry, true);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate exceptional DAX entry if it is clean.
|
||||
*/
|
||||
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
|
||||
pgoff_t index)
|
||||
{
|
||||
return __dax_invalidate_mapping_entry(mapping, index, false);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -488,15 +539,16 @@ int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index)
|
|||
* otherwise it will simply fall out of the page cache under memory
|
||||
* pressure without ever having been dirtied.
|
||||
*/
|
||||
static int dax_load_hole(struct address_space *mapping, void *entry,
|
||||
static int dax_load_hole(struct address_space *mapping, void **entry,
|
||||
struct vm_fault *vmf)
|
||||
{
|
||||
struct page *page;
|
||||
int ret;
|
||||
|
||||
/* Hole page already exists? Return it... */
|
||||
if (!radix_tree_exceptional_entry(entry)) {
|
||||
vmf->page = entry;
|
||||
return VM_FAULT_LOCKED;
|
||||
if (!radix_tree_exceptional_entry(*entry)) {
|
||||
page = *entry;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* This will replace locked radix tree entry with a hole page */
|
||||
|
@ -504,8 +556,17 @@ static int dax_load_hole(struct address_space *mapping, void *entry,
|
|||
vmf->gfp_mask | __GFP_ZERO);
|
||||
if (!page)
|
||||
return VM_FAULT_OOM;
|
||||
out:
|
||||
vmf->page = page;
|
||||
return VM_FAULT_LOCKED;
|
||||
ret = finish_fault(vmf);
|
||||
vmf->page = NULL;
|
||||
*entry = page;
|
||||
if (!ret) {
|
||||
/* Grab reference for PTE that is now referencing the page */
|
||||
get_page(page);
|
||||
return VM_FAULT_NOPAGE;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int copy_user_dax(struct block_device *bdev, sector_t sector, size_t size,
|
||||
|
@ -934,6 +995,17 @@ dax_iomap_actor(struct inode *inode, loff_t pos, loff_t length, void *data,
|
|||
if (WARN_ON_ONCE(iomap->type != IOMAP_MAPPED))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* Write can allocate block for an area which has a hole page mapped
|
||||
* into page tables. We have to tear down these mappings so that data
|
||||
* written by write(2) is visible in mmap.
|
||||
*/
|
||||
if ((iomap->flags & IOMAP_F_NEW) && inode->i_mapping->nrpages) {
|
||||
invalidate_inode_pages2_range(inode->i_mapping,
|
||||
pos >> PAGE_SHIFT,
|
||||
(end - 1) >> PAGE_SHIFT);
|
||||
}
|
||||
|
||||
while (pos < end) {
|
||||
unsigned offset = pos & (PAGE_SIZE - 1);
|
||||
struct blk_dax_ctl dax = { 0 };
|
||||
|
@ -992,23 +1064,6 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
if (iov_iter_rw(iter) == WRITE)
|
||||
flags |= IOMAP_WRITE;
|
||||
|
||||
/*
|
||||
* Yes, even DAX files can have page cache attached to them: A zeroed
|
||||
* page is inserted into the pagecache when we have to serve a write
|
||||
* fault on a hole. It should never be dirtied and can simply be
|
||||
* dropped from the pagecache once we get real data for the page.
|
||||
*
|
||||
* XXX: This is racy against mmap, and there's nothing we can do about
|
||||
* it. We'll eventually need to shift this down even further so that
|
||||
* we can check if we allocated blocks over a hole first.
|
||||
*/
|
||||
if (mapping->nrpages) {
|
||||
ret = invalidate_inode_pages2_range(mapping,
|
||||
pos >> PAGE_SHIFT,
|
||||
(pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT);
|
||||
WARN_ON_ONCE(ret);
|
||||
}
|
||||
|
||||
while (iov_iter_count(iter)) {
|
||||
ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
|
||||
iter, dax_iomap_actor);
|
||||
|
@ -1023,6 +1078,15 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(dax_iomap_rw);
|
||||
|
||||
static int dax_fault_return(int error)
|
||||
{
|
||||
if (error == 0)
|
||||
return VM_FAULT_NOPAGE;
|
||||
if (error == -ENOMEM)
|
||||
return VM_FAULT_OOM;
|
||||
return VM_FAULT_SIGBUS;
|
||||
}
|
||||
|
||||
/**
|
||||
* dax_iomap_fault - handle a page fault on a DAX file
|
||||
* @vma: The virtual memory area where the fault occurred
|
||||
|
@ -1055,12 +1119,6 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|||
if (pos >= i_size_read(inode))
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
|
||||
if (IS_ERR(entry)) {
|
||||
error = PTR_ERR(entry);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if ((vmf->flags & FAULT_FLAG_WRITE) && !vmf->cow_page)
|
||||
flags |= IOMAP_WRITE;
|
||||
|
||||
|
@ -1071,9 +1129,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|||
*/
|
||||
error = ops->iomap_begin(inode, pos, PAGE_SIZE, flags, &iomap);
|
||||
if (error)
|
||||
goto unlock_entry;
|
||||
return dax_fault_return(error);
|
||||
if (WARN_ON_ONCE(iomap.offset + iomap.length < pos + PAGE_SIZE)) {
|
||||
error = -EIO; /* fs corruption? */
|
||||
vmf_ret = dax_fault_return(-EIO); /* fs corruption? */
|
||||
goto finish_iomap;
|
||||
}
|
||||
|
||||
entry = grab_mapping_entry(mapping, vmf->pgoff, 0);
|
||||
if (IS_ERR(entry)) {
|
||||
vmf_ret = dax_fault_return(PTR_ERR(entry));
|
||||
goto finish_iomap;
|
||||
}
|
||||
|
||||
|
@ -1096,13 +1160,13 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|||
}
|
||||
|
||||
if (error)
|
||||
goto finish_iomap;
|
||||
goto error_unlock_entry;
|
||||
|
||||
__SetPageUptodate(vmf->cow_page);
|
||||
vmf_ret = finish_fault(vmf);
|
||||
if (!vmf_ret)
|
||||
vmf_ret = VM_FAULT_DONE_COW;
|
||||
goto finish_iomap;
|
||||
goto unlock_entry;
|
||||
}
|
||||
|
||||
switch (iomap.type) {
|
||||
|
@ -1114,12 +1178,15 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|||
}
|
||||
error = dax_insert_mapping(mapping, iomap.bdev, sector,
|
||||
PAGE_SIZE, &entry, vma, vmf);
|
||||
/* -EBUSY is fine, somebody else faulted on the same PTE */
|
||||
if (error == -EBUSY)
|
||||
error = 0;
|
||||
break;
|
||||
case IOMAP_UNWRITTEN:
|
||||
case IOMAP_HOLE:
|
||||
if (!(vmf->flags & FAULT_FLAG_WRITE)) {
|
||||
vmf_ret = dax_load_hole(mapping, entry, vmf);
|
||||
break;
|
||||
vmf_ret = dax_load_hole(mapping, &entry, vmf);
|
||||
goto unlock_entry;
|
||||
}
|
||||
/*FALLTHRU*/
|
||||
default:
|
||||
|
@ -1128,31 +1195,25 @@ int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
|||
break;
|
||||
}
|
||||
|
||||
error_unlock_entry:
|
||||
vmf_ret = dax_fault_return(error) | major;
|
||||
unlock_entry:
|
||||
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
|
||||
finish_iomap:
|
||||
if (ops->iomap_end) {
|
||||
if (error || (vmf_ret & VM_FAULT_ERROR)) {
|
||||
/* keep previous error */
|
||||
ops->iomap_end(inode, pos, PAGE_SIZE, 0, flags,
|
||||
&iomap);
|
||||
} else {
|
||||
error = ops->iomap_end(inode, pos, PAGE_SIZE,
|
||||
PAGE_SIZE, flags, &iomap);
|
||||
}
|
||||
int copied = PAGE_SIZE;
|
||||
|
||||
if (vmf_ret & VM_FAULT_ERROR)
|
||||
copied = 0;
|
||||
/*
|
||||
* The fault is done by now and there's no way back (other
|
||||
* thread may be already happily using PTE we have installed).
|
||||
* Just ignore error from ->iomap_end since we cannot do much
|
||||
* with it.
|
||||
*/
|
||||
ops->iomap_end(inode, pos, PAGE_SIZE, copied, flags, &iomap);
|
||||
}
|
||||
unlock_entry:
|
||||
if (vmf_ret != VM_FAULT_LOCKED || error)
|
||||
put_locked_mapping_entry(mapping, vmf->pgoff, entry);
|
||||
out:
|
||||
if (error == -ENOMEM)
|
||||
return VM_FAULT_OOM | major;
|
||||
/* -EBUSY is fine, somebody else faulted on the same PTE */
|
||||
if (error < 0 && error != -EBUSY)
|
||||
return VM_FAULT_SIGBUS | major;
|
||||
if (vmf_ret) {
|
||||
WARN_ON_ONCE(error); /* -EBUSY from ops->iomap_end? */
|
||||
return vmf_ret;
|
||||
}
|
||||
return VM_FAULT_NOPAGE | major;
|
||||
return vmf_ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dax_iomap_fault);
|
||||
|
||||
|
@ -1276,16 +1337,6 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
|||
if ((pgoff | PG_PMD_COLOUR) > max_pgoff)
|
||||
goto fallback;
|
||||
|
||||
/*
|
||||
* grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
|
||||
* PMD or a HZP entry. If it can't (because a 4k page is already in
|
||||
* the tree, for instance), it will return -EEXIST and we just fall
|
||||
* back to 4k entries.
|
||||
*/
|
||||
entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
|
||||
if (IS_ERR(entry))
|
||||
goto fallback;
|
||||
|
||||
/*
|
||||
* Note that we don't use iomap_apply here. We aren't doing I/O, only
|
||||
* setting up a mapping, so really we're using iomap_begin() as a way
|
||||
|
@ -1294,10 +1345,21 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
|||
pos = (loff_t)pgoff << PAGE_SHIFT;
|
||||
error = ops->iomap_begin(inode, pos, PMD_SIZE, iomap_flags, &iomap);
|
||||
if (error)
|
||||
goto unlock_entry;
|
||||
goto fallback;
|
||||
|
||||
if (iomap.offset + iomap.length < pos + PMD_SIZE)
|
||||
goto finish_iomap;
|
||||
|
||||
/*
|
||||
* grab_mapping_entry() will make sure we get a 2M empty entry, a DAX
|
||||
* PMD or a HZP entry. If it can't (because a 4k page is already in
|
||||
* the tree, for instance), it will return -EEXIST and we just fall
|
||||
* back to 4k entries.
|
||||
*/
|
||||
entry = grab_mapping_entry(mapping, pgoff, RADIX_DAX_PMD);
|
||||
if (IS_ERR(entry))
|
||||
goto finish_iomap;
|
||||
|
||||
vmf.pgoff = pgoff;
|
||||
vmf.flags = flags;
|
||||
vmf.gfp_mask = mapping_gfp_mask(mapping) | __GFP_IO;
|
||||
|
@ -1310,7 +1372,7 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
|||
case IOMAP_UNWRITTEN:
|
||||
case IOMAP_HOLE:
|
||||
if (WARN_ON_ONCE(write))
|
||||
goto finish_iomap;
|
||||
goto unlock_entry;
|
||||
result = dax_pmd_load_hole(vma, pmd, &vmf, address, &iomap,
|
||||
&entry);
|
||||
break;
|
||||
|
@ -1319,20 +1381,23 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
|
|||
break;
|
||||
}
|
||||
|
||||
finish_iomap:
|
||||
if (ops->iomap_end) {
|
||||
if (result == VM_FAULT_FALLBACK) {
|
||||
ops->iomap_end(inode, pos, PMD_SIZE, 0, iomap_flags,
|
||||
&iomap);
|
||||
} else {
|
||||
error = ops->iomap_end(inode, pos, PMD_SIZE, PMD_SIZE,
|
||||
iomap_flags, &iomap);
|
||||
if (error)
|
||||
result = VM_FAULT_FALLBACK;
|
||||
}
|
||||
}
|
||||
unlock_entry:
|
||||
put_locked_mapping_entry(mapping, pgoff, entry);
|
||||
finish_iomap:
|
||||
if (ops->iomap_end) {
|
||||
int copied = PMD_SIZE;
|
||||
|
||||
if (result == VM_FAULT_FALLBACK)
|
||||
copied = 0;
|
||||
/*
|
||||
* The fault is done by now and there's no way back (other
|
||||
* thread may be already happily using PMD we have installed).
|
||||
* Just ignore error from ->iomap_end since we cannot do much
|
||||
* with it.
|
||||
*/
|
||||
ops->iomap_end(inode, pos, PMD_SIZE, copied, iomap_flags,
|
||||
&iomap);
|
||||
}
|
||||
fallback:
|
||||
if (result == VM_FAULT_FALLBACK) {
|
||||
split_huge_pmd(vma, pmd, address);
|
||||
|
|
|
@ -751,9 +751,8 @@ static int ext2_get_blocks(struct inode *inode,
|
|||
mutex_unlock(&ei->truncate_mutex);
|
||||
goto cleanup;
|
||||
}
|
||||
} else {
|
||||
*new = true;
|
||||
}
|
||||
*new = true;
|
||||
|
||||
ext2_splice_branch(inode, iblock, partial, indirect_blks, count);
|
||||
mutex_unlock(&ei->truncate_mutex);
|
||||
|
|
|
@ -258,7 +258,6 @@ out:
|
|||
static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
int result;
|
||||
handle_t *handle = NULL;
|
||||
struct inode *inode = file_inode(vma->vm_file);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
bool write = vmf->flags & FAULT_FLAG_WRITE;
|
||||
|
@ -266,24 +265,12 @@ static int ext4_dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||
if (write) {
|
||||
sb_start_pagefault(sb);
|
||||
file_update_time(vma->vm_file);
|
||||
down_read(&EXT4_I(inode)->i_mmap_sem);
|
||||
handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
|
||||
EXT4_DATA_TRANS_BLOCKS(sb));
|
||||
} else
|
||||
down_read(&EXT4_I(inode)->i_mmap_sem);
|
||||
|
||||
if (IS_ERR(handle))
|
||||
result = VM_FAULT_SIGBUS;
|
||||
else
|
||||
result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
|
||||
|
||||
if (write) {
|
||||
if (!IS_ERR(handle))
|
||||
ext4_journal_stop(handle);
|
||||
up_read(&EXT4_I(inode)->i_mmap_sem);
|
||||
}
|
||||
down_read(&EXT4_I(inode)->i_mmap_sem);
|
||||
result = dax_iomap_fault(vma, vmf, &ext4_iomap_ops);
|
||||
up_read(&EXT4_I(inode)->i_mmap_sem);
|
||||
if (write)
|
||||
sb_end_pagefault(sb);
|
||||
} else
|
||||
up_read(&EXT4_I(inode)->i_mmap_sem);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -292,7 +279,6 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
|
|||
pmd_t *pmd, unsigned int flags)
|
||||
{
|
||||
int result;
|
||||
handle_t *handle = NULL;
|
||||
struct inode *inode = file_inode(vma->vm_file);
|
||||
struct super_block *sb = inode->i_sb;
|
||||
bool write = flags & FAULT_FLAG_WRITE;
|
||||
|
@ -300,27 +286,13 @@ static int ext4_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
|
|||
if (write) {
|
||||
sb_start_pagefault(sb);
|
||||
file_update_time(vma->vm_file);
|
||||
down_read(&EXT4_I(inode)->i_mmap_sem);
|
||||
handle = ext4_journal_start_sb(sb, EXT4_HT_WRITE_PAGE,
|
||||
ext4_chunk_trans_blocks(inode,
|
||||
PMD_SIZE / PAGE_SIZE));
|
||||
} else
|
||||
down_read(&EXT4_I(inode)->i_mmap_sem);
|
||||
|
||||
if (IS_ERR(handle))
|
||||
result = VM_FAULT_SIGBUS;
|
||||
else {
|
||||
result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
|
||||
&ext4_iomap_ops);
|
||||
}
|
||||
|
||||
if (write) {
|
||||
if (!IS_ERR(handle))
|
||||
ext4_journal_stop(handle);
|
||||
up_read(&EXT4_I(inode)->i_mmap_sem);
|
||||
down_read(&EXT4_I(inode)->i_mmap_sem);
|
||||
result = dax_iomap_pmd_fault(vma, addr, pmd, flags,
|
||||
&ext4_iomap_ops);
|
||||
up_read(&EXT4_I(inode)->i_mmap_sem);
|
||||
if (write)
|
||||
sb_end_pagefault(sb);
|
||||
} else
|
||||
up_read(&EXT4_I(inode)->i_mmap_sem);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -256,6 +256,9 @@ xfs_ag_resv_init(
|
|||
goto out;
|
||||
}
|
||||
|
||||
ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
|
||||
xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <=
|
||||
pag->pagf_freeblks + pag->pagf_flcount);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
|
|
@ -409,13 +409,14 @@ xfs_refcountbt_calc_size(
|
|||
*/
|
||||
xfs_extlen_t
|
||||
xfs_refcountbt_max_size(
|
||||
struct xfs_mount *mp)
|
||||
struct xfs_mount *mp,
|
||||
xfs_agblock_t agblocks)
|
||||
{
|
||||
/* Bail out if we're uninitialized, which can happen in mkfs. */
|
||||
if (mp->m_refc_mxr[0] == 0)
|
||||
return 0;
|
||||
|
||||
return xfs_refcountbt_calc_size(mp, mp->m_sb.sb_agblocks);
|
||||
return xfs_refcountbt_calc_size(mp, agblocks);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -430,22 +431,24 @@ xfs_refcountbt_calc_reserves(
|
|||
{
|
||||
struct xfs_buf *agbp;
|
||||
struct xfs_agf *agf;
|
||||
xfs_agblock_t agblocks;
|
||||
xfs_extlen_t tree_len;
|
||||
int error;
|
||||
|
||||
if (!xfs_sb_version_hasreflink(&mp->m_sb))
|
||||
return 0;
|
||||
|
||||
*ask += xfs_refcountbt_max_size(mp);
|
||||
|
||||
error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
agf = XFS_BUF_TO_AGF(agbp);
|
||||
agblocks = be32_to_cpu(agf->agf_length);
|
||||
tree_len = be32_to_cpu(agf->agf_refcount_blocks);
|
||||
xfs_buf_relse(agbp);
|
||||
|
||||
*ask += xfs_refcountbt_max_size(mp, agblocks);
|
||||
*used += tree_len;
|
||||
|
||||
return error;
|
||||
|
|
|
@ -66,7 +66,8 @@ extern void xfs_refcountbt_compute_maxlevels(struct xfs_mount *mp);
|
|||
|
||||
extern xfs_extlen_t xfs_refcountbt_calc_size(struct xfs_mount *mp,
|
||||
unsigned long long len);
|
||||
extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp);
|
||||
extern xfs_extlen_t xfs_refcountbt_max_size(struct xfs_mount *mp,
|
||||
xfs_agblock_t agblocks);
|
||||
|
||||
extern int xfs_refcountbt_calc_reserves(struct xfs_mount *mp,
|
||||
xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
|
||||
|
|
|
@ -550,13 +550,14 @@ xfs_rmapbt_calc_size(
|
|||
*/
|
||||
xfs_extlen_t
|
||||
xfs_rmapbt_max_size(
|
||||
struct xfs_mount *mp)
|
||||
struct xfs_mount *mp,
|
||||
xfs_agblock_t agblocks)
|
||||
{
|
||||
/* Bail out if we're uninitialized, which can happen in mkfs. */
|
||||
if (mp->m_rmap_mxr[0] == 0)
|
||||
return 0;
|
||||
|
||||
return xfs_rmapbt_calc_size(mp, mp->m_sb.sb_agblocks);
|
||||
return xfs_rmapbt_calc_size(mp, agblocks);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -571,25 +572,24 @@ xfs_rmapbt_calc_reserves(
|
|||
{
|
||||
struct xfs_buf *agbp;
|
||||
struct xfs_agf *agf;
|
||||
xfs_extlen_t pool_len;
|
||||
xfs_agblock_t agblocks;
|
||||
xfs_extlen_t tree_len;
|
||||
int error;
|
||||
|
||||
if (!xfs_sb_version_hasrmapbt(&mp->m_sb))
|
||||
return 0;
|
||||
|
||||
/* Reserve 1% of the AG or enough for 1 block per record. */
|
||||
pool_len = max(mp->m_sb.sb_agblocks / 100, xfs_rmapbt_max_size(mp));
|
||||
*ask += pool_len;
|
||||
|
||||
error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
|
||||
if (error)
|
||||
return error;
|
||||
|
||||
agf = XFS_BUF_TO_AGF(agbp);
|
||||
agblocks = be32_to_cpu(agf->agf_length);
|
||||
tree_len = be32_to_cpu(agf->agf_rmap_blocks);
|
||||
xfs_buf_relse(agbp);
|
||||
|
||||
/* Reserve 1% of the AG or enough for 1 block per record. */
|
||||
*ask += max(agblocks / 100, xfs_rmapbt_max_size(mp, agblocks));
|
||||
*used += tree_len;
|
||||
|
||||
return error;
|
||||
|
|
|
@ -60,7 +60,8 @@ extern void xfs_rmapbt_compute_maxlevels(struct xfs_mount *mp);
|
|||
|
||||
extern xfs_extlen_t xfs_rmapbt_calc_size(struct xfs_mount *mp,
|
||||
unsigned long long len);
|
||||
extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp);
|
||||
extern xfs_extlen_t xfs_rmapbt_max_size(struct xfs_mount *mp,
|
||||
xfs_agblock_t agblocks);
|
||||
|
||||
extern int xfs_rmapbt_calc_reserves(struct xfs_mount *mp,
|
||||
xfs_agnumber_t agno, xfs_extlen_t *ask, xfs_extlen_t *used);
|
||||
|
|
|
@ -631,6 +631,20 @@ xfs_growfs_data_private(
|
|||
xfs_set_low_space_thresholds(mp);
|
||||
mp->m_alloc_set_aside = xfs_alloc_set_aside(mp);
|
||||
|
||||
/*
|
||||
* If we expanded the last AG, free the per-AG reservation
|
||||
* so we can reinitialize it with the new size.
|
||||
*/
|
||||
if (new) {
|
||||
struct xfs_perag *pag;
|
||||
|
||||
pag = xfs_perag_get(mp, agno);
|
||||
error = xfs_ag_resv_free(pag);
|
||||
xfs_perag_put(pag);
|
||||
if (error)
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Reserve AG metadata blocks. */
|
||||
error = xfs_fs_reserve_ag_blocks(mp);
|
||||
if (error && error != -ENOSPC)
|
||||
|
|
|
@ -1597,7 +1597,8 @@ xfs_inode_free_cowblocks(
|
|||
* If the mapping is dirty or under writeback we cannot touch the
|
||||
* CoW fork. Leave it alone if we're in the midst of a directio.
|
||||
*/
|
||||
if (mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
|
||||
if ((VFS_I(ip)->i_state & I_DIRTY_PAGES) ||
|
||||
mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_DIRTY) ||
|
||||
mapping_tagged(VFS_I(ip)->i_mapping, PAGECACHE_TAG_WRITEBACK) ||
|
||||
atomic_read(&VFS_I(ip)->i_dio_count))
|
||||
return 0;
|
||||
|
|
|
@ -526,13 +526,14 @@ xfs_cui_recover(
|
|||
xfs_refcount_finish_one_cleanup(tp, rcur, error);
|
||||
error = xfs_defer_finish(&tp, &dfops, NULL);
|
||||
if (error)
|
||||
goto abort_error;
|
||||
goto abort_defer;
|
||||
set_bit(XFS_CUI_RECOVERED, &cuip->cui_flags);
|
||||
error = xfs_trans_commit(tp);
|
||||
return error;
|
||||
|
||||
abort_error:
|
||||
xfs_refcount_finish_one_cleanup(tp, rcur, error);
|
||||
abort_defer:
|
||||
xfs_defer_cancel(&dfops);
|
||||
xfs_trans_cancel(tp);
|
||||
return error;
|
||||
|
|
|
@ -396,7 +396,7 @@ max_retries_show(
|
|||
int retries;
|
||||
struct xfs_error_cfg *cfg = to_error_cfg(kobject);
|
||||
|
||||
if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
|
||||
if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
|
||||
retries = -1;
|
||||
else
|
||||
retries = cfg->max_retries;
|
||||
|
@ -422,7 +422,7 @@ max_retries_store(
|
|||
return -EINVAL;
|
||||
|
||||
if (val == -1)
|
||||
cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
|
||||
cfg->max_retries = XFS_ERR_RETRY_FOREVER;
|
||||
else
|
||||
cfg->max_retries = val;
|
||||
return count;
|
||||
|
|
|
@ -41,6 +41,9 @@ ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
|
|||
int dax_iomap_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
|
||||
struct iomap_ops *ops);
|
||||
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
|
||||
int dax_invalidate_mapping_entry(struct address_space *mapping, pgoff_t index);
|
||||
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
|
||||
pgoff_t index);
|
||||
void dax_wake_mapping_entry_waiter(struct address_space *mapping,
|
||||
pgoff_t index, void *entry, bool wake_all);
|
||||
|
||||
|
|
|
@ -146,15 +146,6 @@ enum {
|
|||
DISK_EVENT_EJECT_REQUEST = 1 << 1, /* eject requested */
|
||||
};
|
||||
|
||||
#define BLK_SCSI_MAX_CMDS (256)
|
||||
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
|
||||
|
||||
struct blk_scsi_cmd_filter {
|
||||
unsigned long read_ok[BLK_SCSI_CMD_PER_LONG];
|
||||
unsigned long write_ok[BLK_SCSI_CMD_PER_LONG];
|
||||
struct kobject kobj;
|
||||
};
|
||||
|
||||
struct disk_part_tbl {
|
||||
struct rcu_head rcu_head;
|
||||
int len;
|
||||
|
|
|
@ -1384,6 +1384,8 @@ int set_phv_bit(struct mlx4_dev *dev, u8 port, int new_val);
|
|||
int get_phv_bit(struct mlx4_dev *dev, u8 port, int *phv);
|
||||
int mlx4_get_is_vlan_offload_disabled(struct mlx4_dev *dev, u8 port,
|
||||
bool *vlan_offload_disabled);
|
||||
void mlx4_handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
|
||||
struct _rule_hw *eth_header);
|
||||
int mlx4_find_cached_mac(struct mlx4_dev *dev, u8 port, u64 mac, int *idx);
|
||||
int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
|
||||
int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
|
||||
|
|
|
@ -1073,11 +1073,6 @@ enum {
|
|||
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
|
||||
};
|
||||
|
||||
enum {
|
||||
MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
|
||||
MLX5_PCIE_TIMERS_AND_STATES_COUNTERS_GROUP = 0x2,
|
||||
};
|
||||
|
||||
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
|
||||
{
|
||||
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
|
||||
|
|
|
@ -125,7 +125,6 @@ enum {
|
|||
MLX5_REG_HOST_ENDIANNESS = 0x7004,
|
||||
MLX5_REG_MCIA = 0x9014,
|
||||
MLX5_REG_MLCR = 0x902b,
|
||||
MLX5_REG_MPCNT = 0x9051,
|
||||
};
|
||||
|
||||
enum mlx5_dcbx_oper_mode {
|
||||
|
|
|
@ -1758,80 +1758,6 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
|
|||
u8 reserved_at_4c0[0x300];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
|
||||
u8 life_time_counter_high[0x20];
|
||||
|
||||
u8 life_time_counter_low[0x20];
|
||||
|
||||
u8 rx_errors[0x20];
|
||||
|
||||
u8 tx_errors[0x20];
|
||||
|
||||
u8 l0_to_recovery_eieos[0x20];
|
||||
|
||||
u8 l0_to_recovery_ts[0x20];
|
||||
|
||||
u8 l0_to_recovery_framing[0x20];
|
||||
|
||||
u8 l0_to_recovery_retrain[0x20];
|
||||
|
||||
u8 crc_error_dllp[0x20];
|
||||
|
||||
u8 crc_error_tlp[0x20];
|
||||
|
||||
u8 reserved_at_140[0x680];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits {
|
||||
u8 life_time_counter_high[0x20];
|
||||
|
||||
u8 life_time_counter_low[0x20];
|
||||
|
||||
u8 time_to_boot_image_start[0x20];
|
||||
|
||||
u8 time_to_link_image[0x20];
|
||||
|
||||
u8 calibration_time[0x20];
|
||||
|
||||
u8 time_to_first_perst[0x20];
|
||||
|
||||
u8 time_to_detect_state[0x20];
|
||||
|
||||
u8 time_to_l0[0x20];
|
||||
|
||||
u8 time_to_crs_en[0x20];
|
||||
|
||||
u8 time_to_plastic_image_start[0x20];
|
||||
|
||||
u8 time_to_iron_image_start[0x20];
|
||||
|
||||
u8 perst_handler[0x20];
|
||||
|
||||
u8 times_in_l1[0x20];
|
||||
|
||||
u8 times_in_l23[0x20];
|
||||
|
||||
u8 dl_down[0x20];
|
||||
|
||||
u8 config_cycle1usec[0x20];
|
||||
|
||||
u8 config_cycle2to7usec[0x20];
|
||||
|
||||
u8 config_cycle_8to15usec[0x20];
|
||||
|
||||
u8 config_cycle_16_to_63usec[0x20];
|
||||
|
||||
u8 config_cycle_64usec[0x20];
|
||||
|
||||
u8 correctable_err_msg_sent[0x20];
|
||||
|
||||
u8 non_fatal_err_msg_sent[0x20];
|
||||
|
||||
u8 fatal_err_msg_sent[0x20];
|
||||
|
||||
u8 reserved_at_2e0[0x4e0];
|
||||
};
|
||||
|
||||
struct mlx5_ifc_cmd_inter_comp_event_bits {
|
||||
u8 command_completion_vector[0x20];
|
||||
|
||||
|
@ -2997,12 +2923,6 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
|
|||
u8 reserved_at_0[0x7c0];
|
||||
};
|
||||
|
||||
union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
|
||||
struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
|
||||
struct mlx5_ifc_pcie_tas_cntrs_grp_data_layout_bits pcie_tas_cntrs_grp_data_layout;
|
||||
u8 reserved_at_0[0x7c0];
|
||||
};
|
||||
|
||||
union mlx5_ifc_event_auto_bits {
|
||||
struct mlx5_ifc_comp_event_bits comp_event;
|
||||
struct mlx5_ifc_dct_events_bits dct_events;
|
||||
|
@ -7325,18 +7245,6 @@ struct mlx5_ifc_ppcnt_reg_bits {
|
|||
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
|
||||
};
|
||||
|
||||
struct mlx5_ifc_mpcnt_reg_bits {
|
||||
u8 reserved_at_0[0x8];
|
||||
u8 pcie_index[0x8];
|
||||
u8 reserved_at_10[0xa];
|
||||
u8 grp[0x6];
|
||||
|
||||
u8 clr[0x1];
|
||||
u8 reserved_at_21[0x1f];
|
||||
|
||||
union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
|
||||
};
|
||||
|
||||
struct mlx5_ifc_ppad_reg_bits {
|
||||
u8 reserved_at_0[0x3];
|
||||
u8 single_mac[0x1];
|
||||
|
@ -7942,7 +7850,6 @@ union mlx5_ifc_ports_control_registers_document_bits {
|
|||
struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
|
||||
struct mlx5_ifc_ppad_reg_bits ppad_reg;
|
||||
struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
|
||||
struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
|
||||
struct mlx5_ifc_pplm_reg_bits pplm_reg;
|
||||
struct mlx5_ifc_pplr_reg_bits pplr_reg;
|
||||
struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
|
||||
|
|
|
@ -73,13 +73,13 @@
|
|||
*/
|
||||
enum pageflags {
|
||||
PG_locked, /* Page is locked. Don't touch. */
|
||||
PG_waiters, /* Page has waiters, check its waitqueue */
|
||||
PG_error,
|
||||
PG_referenced,
|
||||
PG_uptodate,
|
||||
PG_dirty,
|
||||
PG_lru,
|
||||
PG_active,
|
||||
PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
|
||||
PG_slab,
|
||||
PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
|
||||
PG_arch_1,
|
||||
|
|
36
mm/filemap.c
36
mm/filemap.c
|
@ -912,6 +912,29 @@ void add_page_wait_queue(struct page *page, wait_queue_t *waiter)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(add_page_wait_queue);
|
||||
|
||||
#ifndef clear_bit_unlock_is_negative_byte
|
||||
|
||||
/*
|
||||
* PG_waiters is the high bit in the same byte as PG_lock.
|
||||
*
|
||||
* On x86 (and on many other architectures), we can clear PG_lock and
|
||||
* test the sign bit at the same time. But if the architecture does
|
||||
* not support that special operation, we just do this all by hand
|
||||
* instead.
|
||||
*
|
||||
* The read of PG_waiters has to be after (or concurrently with) PG_locked
|
||||
* being cleared, but a memory barrier should be unneccssary since it is
|
||||
* in the same byte as PG_locked.
|
||||
*/
|
||||
static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem)
|
||||
{
|
||||
clear_bit_unlock(nr, mem);
|
||||
/* smp_mb__after_atomic(); */
|
||||
return test_bit(PG_waiters, mem);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/**
|
||||
* unlock_page - unlock a locked page
|
||||
* @page: the page
|
||||
|
@ -921,16 +944,19 @@ EXPORT_SYMBOL_GPL(add_page_wait_queue);
|
|||
* mechanism between PageLocked pages and PageWriteback pages is shared.
|
||||
* But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
|
||||
*
|
||||
* The mb is necessary to enforce ordering between the clear_bit and the read
|
||||
* of the waitqueue (to avoid SMP races with a parallel wait_on_page_locked()).
|
||||
* Note that this depends on PG_waiters being the sign bit in the byte
|
||||
* that contains PG_locked - thus the BUILD_BUG_ON(). That allows us to
|
||||
* clear the PG_locked bit and test PG_waiters at the same time fairly
|
||||
* portably (architectures that do LL/SC can test any bit, while x86 can
|
||||
* test the sign bit).
|
||||
*/
|
||||
void unlock_page(struct page *page)
|
||||
{
|
||||
BUILD_BUG_ON(PG_waiters != 7);
|
||||
page = compound_head(page);
|
||||
VM_BUG_ON_PAGE(!PageLocked(page), page);
|
||||
clear_bit_unlock(PG_locked, &page->flags);
|
||||
smp_mb__after_atomic();
|
||||
wake_up_page(page, PG_locked);
|
||||
if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags))
|
||||
wake_up_page_bit(page, PG_locked);
|
||||
}
|
||||
EXPORT_SYMBOL(unlock_page);
|
||||
|
||||
|
|
|
@ -24,20 +24,12 @@
|
|||
#include <linux/rmap.h>
|
||||
#include "internal.h"
|
||||
|
||||
static void clear_exceptional_entry(struct address_space *mapping,
|
||||
pgoff_t index, void *entry)
|
||||
static void clear_shadow_entry(struct address_space *mapping, pgoff_t index,
|
||||
void *entry)
|
||||
{
|
||||
struct radix_tree_node *node;
|
||||
void **slot;
|
||||
|
||||
/* Handled by shmem itself */
|
||||
if (shmem_mapping(mapping))
|
||||
return;
|
||||
|
||||
if (dax_mapping(mapping)) {
|
||||
dax_delete_mapping_entry(mapping, index);
|
||||
return;
|
||||
}
|
||||
spin_lock_irq(&mapping->tree_lock);
|
||||
/*
|
||||
* Regular page slots are stabilized by the page lock even
|
||||
|
@ -55,6 +47,56 @@ unlock:
|
|||
spin_unlock_irq(&mapping->tree_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* Unconditionally remove exceptional entry. Usually called from truncate path.
|
||||
*/
|
||||
static void truncate_exceptional_entry(struct address_space *mapping,
|
||||
pgoff_t index, void *entry)
|
||||
{
|
||||
/* Handled by shmem itself */
|
||||
if (shmem_mapping(mapping))
|
||||
return;
|
||||
|
||||
if (dax_mapping(mapping)) {
|
||||
dax_delete_mapping_entry(mapping, index);
|
||||
return;
|
||||
}
|
||||
clear_shadow_entry(mapping, index, entry);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate exceptional entry if easily possible. This handles exceptional
|
||||
* entries for invalidate_inode_pages() so for DAX it evicts only unlocked and
|
||||
* clean entries.
|
||||
*/
|
||||
static int invalidate_exceptional_entry(struct address_space *mapping,
|
||||
pgoff_t index, void *entry)
|
||||
{
|
||||
/* Handled by shmem itself */
|
||||
if (shmem_mapping(mapping))
|
||||
return 1;
|
||||
if (dax_mapping(mapping))
|
||||
return dax_invalidate_mapping_entry(mapping, index);
|
||||
clear_shadow_entry(mapping, index, entry);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Invalidate exceptional entry if clean. This handles exceptional entries for
|
||||
* invalidate_inode_pages2() so for DAX it evicts only clean entries.
|
||||
*/
|
||||
static int invalidate_exceptional_entry2(struct address_space *mapping,
|
||||
pgoff_t index, void *entry)
|
||||
{
|
||||
/* Handled by shmem itself */
|
||||
if (shmem_mapping(mapping))
|
||||
return 1;
|
||||
if (dax_mapping(mapping))
|
||||
return dax_invalidate_mapping_entry_sync(mapping, index);
|
||||
clear_shadow_entry(mapping, index, entry);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* do_invalidatepage - invalidate part or all of a page
|
||||
* @page: the page which is affected
|
||||
|
@ -262,7 +304,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|||
break;
|
||||
|
||||
if (radix_tree_exceptional_entry(page)) {
|
||||
clear_exceptional_entry(mapping, index, page);
|
||||
truncate_exceptional_entry(mapping, index,
|
||||
page);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -351,7 +394,8 @@ void truncate_inode_pages_range(struct address_space *mapping,
|
|||
}
|
||||
|
||||
if (radix_tree_exceptional_entry(page)) {
|
||||
clear_exceptional_entry(mapping, index, page);
|
||||
truncate_exceptional_entry(mapping, index,
|
||||
page);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -470,7 +514,8 @@ unsigned long invalidate_mapping_pages(struct address_space *mapping,
|
|||
break;
|
||||
|
||||
if (radix_tree_exceptional_entry(page)) {
|
||||
clear_exceptional_entry(mapping, index, page);
|
||||
invalidate_exceptional_entry(mapping, index,
|
||||
page);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -592,7 +637,9 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
|||
break;
|
||||
|
||||
if (radix_tree_exceptional_entry(page)) {
|
||||
clear_exceptional_entry(mapping, index, page);
|
||||
if (!invalidate_exceptional_entry2(mapping,
|
||||
index, page))
|
||||
ret = -EBUSY;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -1059,7 +1059,9 @@ static void __exit lane_module_cleanup(void)
|
|||
{
|
||||
int i;
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
remove_proc_entry("lec", atm_proc_root);
|
||||
#endif
|
||||
|
||||
deregister_atm_ioctl(&lane_ioctl_ops);
|
||||
|
||||
|
|
|
@ -75,6 +75,7 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
|
|||
struct nlattr *nla;
|
||||
struct sk_buff *skb;
|
||||
unsigned long flags;
|
||||
void *msg_header;
|
||||
|
||||
al = sizeof(struct net_dm_alert_msg);
|
||||
al += dm_hit_limit * sizeof(struct net_dm_drop_point);
|
||||
|
@ -82,21 +83,41 @@ static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
|
|||
|
||||
skb = genlmsg_new(al, GFP_KERNEL);
|
||||
|
||||
if (skb) {
|
||||
genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
|
||||
0, NET_DM_CMD_ALERT);
|
||||
nla = nla_reserve(skb, NLA_UNSPEC,
|
||||
sizeof(struct net_dm_alert_msg));
|
||||
msg = nla_data(nla);
|
||||
memset(msg, 0, al);
|
||||
} else {
|
||||
mod_timer(&data->send_timer, jiffies + HZ / 10);
|
||||
}
|
||||
if (!skb)
|
||||
goto err;
|
||||
|
||||
msg_header = genlmsg_put(skb, 0, 0, &net_drop_monitor_family,
|
||||
0, NET_DM_CMD_ALERT);
|
||||
if (!msg_header) {
|
||||
nlmsg_free(skb);
|
||||
skb = NULL;
|
||||
goto err;
|
||||
}
|
||||
nla = nla_reserve(skb, NLA_UNSPEC,
|
||||
sizeof(struct net_dm_alert_msg));
|
||||
if (!nla) {
|
||||
nlmsg_free(skb);
|
||||
skb = NULL;
|
||||
goto err;
|
||||
}
|
||||
msg = nla_data(nla);
|
||||
memset(msg, 0, al);
|
||||
goto out;
|
||||
|
||||
err:
|
||||
mod_timer(&data->send_timer, jiffies + HZ / 10);
|
||||
out:
|
||||
spin_lock_irqsave(&data->lock, flags);
|
||||
swap(data->skb, skb);
|
||||
spin_unlock_irqrestore(&data->lock, flags);
|
||||
|
||||
if (skb) {
|
||||
struct nlmsghdr *nlh = (struct nlmsghdr *)skb->data;
|
||||
struct genlmsghdr *gnlh = (struct genlmsghdr *)nlmsg_data(nlh);
|
||||
|
||||
genlmsg_end(skb, genlmsg_data(gnlh));
|
||||
}
|
||||
|
||||
return skb;
|
||||
}
|
||||
|
||||
|
|
|
@ -468,8 +468,9 @@ ip_proto_again:
|
|||
if (hdr->flags & GRE_ACK)
|
||||
offset += sizeof(((struct pptp_gre_header *)0)->ack);
|
||||
|
||||
ppp_hdr = skb_header_pointer(skb, nhoff + offset,
|
||||
sizeof(_ppp_hdr), _ppp_hdr);
|
||||
ppp_hdr = __skb_header_pointer(skb, nhoff + offset,
|
||||
sizeof(_ppp_hdr),
|
||||
data, hlen, _ppp_hdr);
|
||||
if (!ppp_hdr)
|
||||
goto out_bad;
|
||||
|
||||
|
|
|
@ -3898,6 +3898,9 @@ static int rtnl_stats_get(struct sk_buff *skb, struct nlmsghdr *nlh)
|
|||
u32 filter_mask;
|
||||
int err;
|
||||
|
||||
if (nlmsg_len(nlh) < sizeof(*ifsm))
|
||||
return -EINVAL;
|
||||
|
||||
ifsm = nlmsg_data(nlh);
|
||||
if (ifsm->ifindex > 0)
|
||||
dev = __dev_get_by_index(net, ifsm->ifindex);
|
||||
|
@ -3947,6 +3950,9 @@ static int rtnl_stats_dump(struct sk_buff *skb, struct netlink_callback *cb)
|
|||
|
||||
cb->seq = net->dev_base_seq;
|
||||
|
||||
if (nlmsg_len(cb->nlh) < sizeof(*ifsm))
|
||||
return -EINVAL;
|
||||
|
||||
ifsm = nlmsg_data(cb->nlh);
|
||||
filter_mask = ifsm->filter_mask;
|
||||
if (!filter_mask)
|
||||
|
|
|
@ -85,7 +85,7 @@ struct fib_table *fib_new_table(struct net *net, u32 id)
|
|||
if (tb)
|
||||
return tb;
|
||||
|
||||
if (id == RT_TABLE_LOCAL)
|
||||
if (id == RT_TABLE_LOCAL && !net->ipv4.fib_has_custom_rules)
|
||||
alias = fib_new_table(net, RT_TABLE_MAIN);
|
||||
|
||||
tb = fib_trie_table(id, alias);
|
||||
|
|
|
@ -219,9 +219,14 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
|
|||
static void igmp_gq_start_timer(struct in_device *in_dev)
|
||||
{
|
||||
int tv = prandom_u32() % in_dev->mr_maxdelay;
|
||||
unsigned long exp = jiffies + tv + 2;
|
||||
|
||||
if (in_dev->mr_gq_running &&
|
||||
time_after_eq(exp, (in_dev->mr_gq_timer).expires))
|
||||
return;
|
||||
|
||||
in_dev->mr_gq_running = 1;
|
||||
if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
|
||||
if (!mod_timer(&in_dev->mr_gq_timer, exp))
|
||||
in_dev_hold(in_dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -1230,8 +1230,14 @@ void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb)
|
|||
* which has interface index (iif) as the first member of the
|
||||
* underlying inet{6}_skb_parm struct. This code then overlays
|
||||
* PKTINFO_SKB_CB and in_pktinfo also has iif as the first
|
||||
* element so the iif is picked up from the prior IPCB
|
||||
* element so the iif is picked up from the prior IPCB. If iif
|
||||
* is the loopback interface, then return the sending interface
|
||||
* (e.g., process binds socket to eth0 for Tx which is
|
||||
* redirected to loopback in the rtable/dst).
|
||||
*/
|
||||
if (pktinfo->ipi_ifindex == LOOPBACK_IFINDEX)
|
||||
pktinfo->ipi_ifindex = inet_iif(skb);
|
||||
|
||||
pktinfo->ipi_spec_dst.s_addr = fib_compute_spec_dst(skb);
|
||||
} else {
|
||||
pktinfo->ipi_ifindex = 0;
|
||||
|
|
|
@ -1914,7 +1914,8 @@ local_input:
|
|||
}
|
||||
}
|
||||
|
||||
rth = rt_dst_alloc(net->loopback_dev, flags | RTCF_LOCAL, res.type,
|
||||
rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
|
||||
flags | RTCF_LOCAL, res.type,
|
||||
IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
|
||||
if (!rth)
|
||||
goto e_nobufs;
|
||||
|
|
|
@ -1373,7 +1373,7 @@ emsgsize:
|
|||
*/
|
||||
|
||||
cork->length += length;
|
||||
if (((length > mtu) ||
|
||||
if ((((length + fragheaderlen) > mtu) ||
|
||||
(skb && skb_is_gso(skb))) &&
|
||||
(sk->sk_protocol == IPPROTO_UDP) &&
|
||||
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len &&
|
||||
|
|
|
@ -47,7 +47,8 @@ static inline struct l2tp_ip_sock *l2tp_ip_sk(const struct sock *sk)
|
|||
return (struct l2tp_ip_sock *)sk;
|
||||
}
|
||||
|
||||
static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
|
||||
static struct sock *__l2tp_ip_bind_lookup(const struct net *net, __be32 laddr,
|
||||
__be32 raddr, int dif, u32 tunnel_id)
|
||||
{
|
||||
struct sock *sk;
|
||||
|
||||
|
@ -61,6 +62,7 @@ static struct sock *__l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif
|
|||
if ((l2tp->conn_id == tunnel_id) &&
|
||||
net_eq(sock_net(sk), net) &&
|
||||
!(inet->inet_rcv_saddr && inet->inet_rcv_saddr != laddr) &&
|
||||
(!inet->inet_daddr || !raddr || inet->inet_daddr == raddr) &&
|
||||
(!sk->sk_bound_dev_if || !dif ||
|
||||
sk->sk_bound_dev_if == dif))
|
||||
goto found;
|
||||
|
@ -71,15 +73,6 @@ found:
|
|||
return sk;
|
||||
}
|
||||
|
||||
static inline struct sock *l2tp_ip_bind_lookup(struct net *net, __be32 laddr, int dif, u32 tunnel_id)
|
||||
{
|
||||
struct sock *sk = __l2tp_ip_bind_lookup(net, laddr, dif, tunnel_id);
|
||||
if (sk)
|
||||
sock_hold(sk);
|
||||
|
||||
return sk;
|
||||
}
|
||||
|
||||
/* When processing receive frames, there are two cases to
|
||||
* consider. Data frames consist of a non-zero session-id and an
|
||||
* optional cookie. Control frames consist of a regular L2TP header
|
||||
|
@ -183,8 +176,8 @@ pass_up:
|
|||
struct iphdr *iph = (struct iphdr *) skb_network_header(skb);
|
||||
|
||||
read_lock_bh(&l2tp_ip_lock);
|
||||
sk = __l2tp_ip_bind_lookup(net, iph->daddr, inet_iif(skb),
|
||||
tunnel_id);
|
||||
sk = __l2tp_ip_bind_lookup(net, iph->daddr, iph->saddr,
|
||||
inet_iif(skb), tunnel_id);
|
||||
if (!sk) {
|
||||
read_unlock_bh(&l2tp_ip_lock);
|
||||
goto discard;
|
||||
|
@ -280,7 +273,7 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||
inet->inet_saddr = 0; /* Use device */
|
||||
|
||||
write_lock_bh(&l2tp_ip_lock);
|
||||
if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr,
|
||||
if (__l2tp_ip_bind_lookup(net, addr->l2tp_addr.s_addr, 0,
|
||||
sk->sk_bound_dev_if, addr->l2tp_conn_id)) {
|
||||
write_unlock_bh(&l2tp_ip_lock);
|
||||
ret = -EADDRINUSE;
|
||||
|
|
|
@ -59,12 +59,14 @@ static inline struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk)
|
|||
|
||||
static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
|
||||
struct in6_addr *laddr,
|
||||
const struct in6_addr *raddr,
|
||||
int dif, u32 tunnel_id)
|
||||
{
|
||||
struct sock *sk;
|
||||
|
||||
sk_for_each_bound(sk, &l2tp_ip6_bind_table) {
|
||||
const struct in6_addr *addr = inet6_rcv_saddr(sk);
|
||||
const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk);
|
||||
const struct in6_addr *sk_raddr = &sk->sk_v6_daddr;
|
||||
struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk);
|
||||
|
||||
if (l2tp == NULL)
|
||||
|
@ -72,7 +74,8 @@ static struct sock *__l2tp_ip6_bind_lookup(struct net *net,
|
|||
|
||||
if ((l2tp->conn_id == tunnel_id) &&
|
||||
net_eq(sock_net(sk), net) &&
|
||||
(!addr || ipv6_addr_equal(addr, laddr)) &&
|
||||
(!sk_laddr || ipv6_addr_any(sk_laddr) || ipv6_addr_equal(sk_laddr, laddr)) &&
|
||||
(!raddr || ipv6_addr_any(sk_raddr) || ipv6_addr_equal(sk_raddr, raddr)) &&
|
||||
(!sk->sk_bound_dev_if || !dif ||
|
||||
sk->sk_bound_dev_if == dif))
|
||||
goto found;
|
||||
|
@ -83,17 +86,6 @@ found:
|
|||
return sk;
|
||||
}
|
||||
|
||||
static inline struct sock *l2tp_ip6_bind_lookup(struct net *net,
|
||||
struct in6_addr *laddr,
|
||||
int dif, u32 tunnel_id)
|
||||
{
|
||||
struct sock *sk = __l2tp_ip6_bind_lookup(net, laddr, dif, tunnel_id);
|
||||
if (sk)
|
||||
sock_hold(sk);
|
||||
|
||||
return sk;
|
||||
}
|
||||
|
||||
/* When processing receive frames, there are two cases to
|
||||
* consider. Data frames consist of a non-zero session-id and an
|
||||
* optional cookie. Control frames consist of a regular L2TP header
|
||||
|
@ -197,8 +189,8 @@ pass_up:
|
|||
struct ipv6hdr *iph = ipv6_hdr(skb);
|
||||
|
||||
read_lock_bh(&l2tp_ip6_lock);
|
||||
sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, inet6_iif(skb),
|
||||
tunnel_id);
|
||||
sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr,
|
||||
inet6_iif(skb), tunnel_id);
|
||||
if (!sk) {
|
||||
read_unlock_bh(&l2tp_ip6_lock);
|
||||
goto discard;
|
||||
|
@ -330,7 +322,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
|
|||
rcu_read_unlock();
|
||||
|
||||
write_lock_bh(&l2tp_ip6_lock);
|
||||
if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, bound_dev_if,
|
||||
if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if,
|
||||
addr->l2tp_conn_id)) {
|
||||
write_unlock_bh(&l2tp_ip6_lock);
|
||||
err = -EADDRINUSE;
|
||||
|
|
|
@ -3287,7 +3287,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
|
|||
int extra_head = fast_tx->hdr_len - (ETH_HLEN - 2);
|
||||
int hw_headroom = sdata->local->hw.extra_tx_headroom;
|
||||
struct ethhdr eth;
|
||||
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
|
||||
struct ieee80211_tx_info *info;
|
||||
struct ieee80211_hdr *hdr = (void *)fast_tx->hdr;
|
||||
struct ieee80211_tx_data tx;
|
||||
ieee80211_tx_result r;
|
||||
|
@ -3351,6 +3351,7 @@ static bool ieee80211_xmit_fast(struct ieee80211_sub_if_data *sdata,
|
|||
memcpy(skb->data + fast_tx->da_offs, eth.h_dest, ETH_ALEN);
|
||||
memcpy(skb->data + fast_tx->sa_offs, eth.h_source, ETH_ALEN);
|
||||
|
||||
info = IEEE80211_SKB_CB(skb);
|
||||
memset(info, 0, sizeof(*info));
|
||||
info->band = fast_tx->band;
|
||||
info->control.vif = &sdata->vif;
|
||||
|
|
|
@ -153,10 +153,14 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
|
|||
|
||||
switch (ip_tunnel_info_af(info)) {
|
||||
case AF_INET:
|
||||
skb_key.enc_control.addr_type =
|
||||
FLOW_DISSECTOR_KEY_IPV4_ADDRS;
|
||||
skb_key.enc_ipv4.src = key->u.ipv4.src;
|
||||
skb_key.enc_ipv4.dst = key->u.ipv4.dst;
|
||||
break;
|
||||
case AF_INET6:
|
||||
skb_key.enc_control.addr_type =
|
||||
FLOW_DISSECTOR_KEY_IPV6_ADDRS;
|
||||
skb_key.enc_ipv6.src = key->u.ipv6.src;
|
||||
skb_key.enc_ipv6.dst = key->u.ipv6.dst;
|
||||
break;
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче