Merge branch 'master' of ssh://master.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into perf/urgent
This commit is contained in:
Коммит
4d70230bb4
|
@ -66,10 +66,10 @@ trick is to ensure that any needed memory allocations are done before
|
|||
entering atomic context, using:
|
||||
|
||||
int flex_array_prealloc(struct flex_array *array, unsigned int start,
|
||||
unsigned int end, gfp_t flags);
|
||||
unsigned int nr_elements, gfp_t flags);
|
||||
|
||||
This function will ensure that memory for the elements indexed in the range
|
||||
defined by start and end has been allocated. Thereafter, a
|
||||
defined by start and nr_elements has been allocated. Thereafter, a
|
||||
flex_array_put() call on an element in that range is guaranteed not to
|
||||
block.
|
||||
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 2
|
||||
PATCHLEVEL = 6
|
||||
SUBLEVEL = 39
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Flesh-Eating Bats with Fangs
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
CONFIG_EXPERIMENTAL=y
|
||||
CONFIG_LOG_BUF_SHIFT=14
|
||||
CONFIG_EMBEDDED=y
|
||||
# CONFIG_HOTPLUG is not set
|
||||
# CONFIG_ELF_CORE is not set
|
||||
# CONFIG_FUTEX is not set
|
||||
# CONFIG_TIMERFD is not set
|
||||
# CONFIG_VM_EVENT_COUNTERS is not set
|
||||
# CONFIG_COMPAT_BRK is not set
|
||||
CONFIG_SLAB=y
|
||||
# CONFIG_LBDAF is not set
|
||||
# CONFIG_BLK_DEV_BSG is not set
|
||||
# CONFIG_IOSCHED_DEADLINE is not set
|
||||
# CONFIG_IOSCHED_CFQ is not set
|
||||
# CONFIG_MMU is not set
|
||||
CONFIG_ARCH_AT91=y
|
||||
CONFIG_ARCH_AT91X40=y
|
||||
CONFIG_MACH_AT91EB01=y
|
||||
CONFIG_AT91_EARLY_USART0=y
|
||||
CONFIG_CPU_ARM7TDMI=y
|
||||
CONFIG_SET_MEM_PARAM=y
|
||||
CONFIG_DRAM_BASE=0x01000000
|
||||
CONFIG_DRAM_SIZE=0x00400000
|
||||
CONFIG_FLASH_MEM_BASE=0x01400000
|
||||
CONFIG_PROCESSOR_ID=0x14000040
|
||||
CONFIG_ZBOOT_ROM_TEXT=0x0
|
||||
CONFIG_ZBOOT_ROM_BSS=0x0
|
||||
CONFIG_BINFMT_FLAT=y
|
||||
# CONFIG_SUSPEND is not set
|
||||
# CONFIG_FW_LOADER is not set
|
||||
CONFIG_MTD=y
|
||||
CONFIG_MTD_PARTITIONS=y
|
||||
CONFIG_MTD_CHAR=y
|
||||
CONFIG_MTD_BLOCK=y
|
||||
CONFIG_MTD_RAM=y
|
||||
CONFIG_MTD_ROM=y
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
# CONFIG_INPUT is not set
|
||||
# CONFIG_SERIO is not set
|
||||
# CONFIG_VT is not set
|
||||
# CONFIG_DEVKMEM is not set
|
||||
# CONFIG_HW_RANDOM is not set
|
||||
# CONFIG_HWMON is not set
|
||||
# CONFIG_USB_SUPPORT is not set
|
||||
CONFIG_EXT2_FS=y
|
||||
# CONFIG_DNOTIFY is not set
|
||||
CONFIG_ROMFS_FS=y
|
||||
# CONFIG_ENABLE_MUST_CHECK is not set
|
|
@ -83,6 +83,7 @@ config ARCH_AT91CAP9
|
|||
select CPU_ARM926T
|
||||
select GENERIC_CLOCKEVENTS
|
||||
select HAVE_FB_ATMEL
|
||||
select HAVE_NET_MACB
|
||||
|
||||
config ARCH_AT572D940HF
|
||||
bool "AT572D940HF"
|
||||
|
|
|
@ -30,6 +30,11 @@
|
|||
#include <mach/board.h>
|
||||
#include "generic.h"
|
||||
|
||||
static void __init at91eb01_init_irq(void)
|
||||
{
|
||||
at91x40_init_interrupts(NULL);
|
||||
}
|
||||
|
||||
static void __init at91eb01_map_io(void)
|
||||
{
|
||||
at91x40_initialize(40000000);
|
||||
|
@ -38,7 +43,7 @@ static void __init at91eb01_map_io(void)
|
|||
MACHINE_START(AT91EB01, "Atmel AT91 EB01")
|
||||
/* Maintainer: Greg Ungerer <gerg@snapgear.com> */
|
||||
.timer = &at91x40_timer,
|
||||
.init_irq = at91x40_init_interrupts,
|
||||
.init_irq = at91eb01_init_irq,
|
||||
.map_io = at91eb01_map_io,
|
||||
MACHINE_END
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#define ARCH_ID_AT91SAM9G45 0x819b05a0
|
||||
#define ARCH_ID_AT91SAM9G45MRL 0x819b05a2 /* aka 9G45-ES2 & non ES lots */
|
||||
#define ARCH_ID_AT91SAM9G45ES 0x819b05a1 /* 9G45-ES (Engineering Sample) */
|
||||
#define ARCH_ID_AT91SAM9X5 0x819a05a0
|
||||
#define ARCH_ID_AT91CAP9 0x039A03A0
|
||||
|
||||
#define ARCH_ID_AT91SAM9XE128 0x329973a0
|
||||
|
@ -55,6 +56,12 @@ static inline unsigned long at91_cpu_fully_identify(void)
|
|||
#define ARCH_EXID_AT91SAM9G46 0x00000003
|
||||
#define ARCH_EXID_AT91SAM9G45 0x00000004
|
||||
|
||||
#define ARCH_EXID_AT91SAM9G15 0x00000000
|
||||
#define ARCH_EXID_AT91SAM9G35 0x00000001
|
||||
#define ARCH_EXID_AT91SAM9X35 0x00000002
|
||||
#define ARCH_EXID_AT91SAM9G25 0x00000003
|
||||
#define ARCH_EXID_AT91SAM9X25 0x00000004
|
||||
|
||||
static inline unsigned long at91_exid_identify(void)
|
||||
{
|
||||
return at91_sys_read(AT91_DBGU_EXID);
|
||||
|
@ -143,6 +150,27 @@ static inline unsigned long at91cap9_rev_identify(void)
|
|||
#define cpu_is_at91sam9m11() (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_AT91SAM9X5
|
||||
#define cpu_is_at91sam9x5() (at91_cpu_identify() == ARCH_ID_AT91SAM9X5)
|
||||
#define cpu_is_at91sam9g15() (cpu_is_at91sam9x5() && \
|
||||
(at91_exid_identify() == ARCH_EXID_AT91SAM9G15))
|
||||
#define cpu_is_at91sam9g35() (cpu_is_at91sam9x5() && \
|
||||
(at91_exid_identify() == ARCH_EXID_AT91SAM9G35))
|
||||
#define cpu_is_at91sam9x35() (cpu_is_at91sam9x5() && \
|
||||
(at91_exid_identify() == ARCH_EXID_AT91SAM9X35))
|
||||
#define cpu_is_at91sam9g25() (cpu_is_at91sam9x5() && \
|
||||
(at91_exid_identify() == ARCH_EXID_AT91SAM9G25))
|
||||
#define cpu_is_at91sam9x25() (cpu_is_at91sam9x5() && \
|
||||
(at91_exid_identify() == ARCH_EXID_AT91SAM9X25))
|
||||
#else
|
||||
#define cpu_is_at91sam9x5() (0)
|
||||
#define cpu_is_at91sam9g15() (0)
|
||||
#define cpu_is_at91sam9g35() (0)
|
||||
#define cpu_is_at91sam9x35() (0)
|
||||
#define cpu_is_at91sam9g25() (0)
|
||||
#define cpu_is_at91sam9x25() (0)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ARCH_AT91CAP9
|
||||
#define cpu_is_at91cap9() (at91_cpu_identify() == ARCH_ID_AT91CAP9)
|
||||
#define cpu_is_at91cap9_revB() (at91cap9_rev_identify() == ARCH_REVISION_CAP9_B)
|
||||
|
|
|
@ -698,7 +698,7 @@ cpu_dev_register(amd_cpu_dev);
|
|||
*/
|
||||
|
||||
const int amd_erratum_400[] =
|
||||
AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
|
||||
AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0x0f, 0x4, 0x2, 0xff, 0xf),
|
||||
AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
|
||||
EXPORT_SYMBOL_GPL(amd_erratum_400);
|
||||
|
||||
|
|
|
@ -21,26 +21,26 @@ r_base = .
|
|||
/* Get our own relocated address */
|
||||
call 1f
|
||||
1: popl %ebx
|
||||
subl $1b, %ebx
|
||||
subl $(1b - r_base), %ebx
|
||||
|
||||
/* Compute the equivalent real-mode segment */
|
||||
movl %ebx, %ecx
|
||||
shrl $4, %ecx
|
||||
|
||||
/* Patch post-real-mode segment jump */
|
||||
movw dispatch_table(%ebx,%eax,2),%ax
|
||||
movw %ax, 101f(%ebx)
|
||||
movw %cx, 102f(%ebx)
|
||||
movw (dispatch_table - r_base)(%ebx,%eax,2),%ax
|
||||
movw %ax, (101f - r_base)(%ebx)
|
||||
movw %cx, (102f - r_base)(%ebx)
|
||||
|
||||
/* Set up the IDT for real mode. */
|
||||
lidtl machine_real_restart_idt(%ebx)
|
||||
lidtl (machine_real_restart_idt - r_base)(%ebx)
|
||||
|
||||
/*
|
||||
* Set up a GDT from which we can load segment descriptors for real
|
||||
* mode. The GDT is not used in real mode; it is just needed here to
|
||||
* prepare the descriptors.
|
||||
*/
|
||||
lgdtl machine_real_restart_gdt(%ebx)
|
||||
lgdtl (machine_real_restart_gdt - r_base)(%ebx)
|
||||
|
||||
/*
|
||||
* Load the data segment registers with 16-bit compatible values
|
||||
|
|
|
@ -306,7 +306,7 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
|
|||
bi->end = min(bi->end, high);
|
||||
|
||||
/* and there's no empty block */
|
||||
if (bi->start == bi->end) {
|
||||
if (bi->start >= bi->end) {
|
||||
numa_remove_memblk_from(i--, mi);
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -1463,6 +1463,119 @@ static int xen_pgd_alloc(struct mm_struct *mm)
|
|||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
static __initdata u64 __last_pgt_set_rw = 0;
|
||||
static __initdata u64 __pgt_buf_start = 0;
|
||||
static __initdata u64 __pgt_buf_end = 0;
|
||||
static __initdata u64 __pgt_buf_top = 0;
|
||||
/*
|
||||
* As a consequence of the commit:
|
||||
*
|
||||
* commit 4b239f458c229de044d6905c2b0f9fe16ed9e01e
|
||||
* Author: Yinghai Lu <yinghai@kernel.org>
|
||||
* Date: Fri Dec 17 16:58:28 2010 -0800
|
||||
*
|
||||
* x86-64, mm: Put early page table high
|
||||
*
|
||||
* at some point init_memory_mapping is going to reach the pagetable pages
|
||||
* area and map those pages too (mapping them as normal memory that falls
|
||||
* in the range of addresses passed to init_memory_mapping as argument).
|
||||
* Some of those pages are already pagetable pages (they are in the range
|
||||
* pgt_buf_start-pgt_buf_end) therefore they are going to be mapped RO and
|
||||
* everything is fine.
|
||||
* Some of these pages are not pagetable pages yet (they fall in the range
|
||||
* pgt_buf_end-pgt_buf_top; for example the page at pgt_buf_end) so they
|
||||
* are going to be mapped RW. When these pages become pagetable pages and
|
||||
* are hooked into the pagetable, xen will find that the guest has already
|
||||
* a RW mapping of them somewhere and fail the operation.
|
||||
* The reason Xen requires pagetables to be RO is that the hypervisor needs
|
||||
* to verify that the pagetables are valid before using them. The validation
|
||||
* operations are called "pinning".
|
||||
*
|
||||
* In order to fix the issue we mark all the pages in the entire range
|
||||
* pgt_buf_start-pgt_buf_top as RO, however when the pagetable allocation
|
||||
* is completed only the range pgt_buf_start-pgt_buf_end is reserved by
|
||||
* init_memory_mapping. Hence the kernel is going to crash as soon as one
|
||||
* of the pages in the range pgt_buf_end-pgt_buf_top is reused (b/c those
|
||||
* ranges are RO).
|
||||
*
|
||||
* For this reason, 'mark_rw_past_pgt' is introduced which is called _after_
|
||||
* the init_memory_mapping has completed (in a perfect world we would
|
||||
* call this function from init_memory_mapping, but lets ignore that).
|
||||
*
|
||||
* Because we are called _after_ init_memory_mapping the pgt_buf_[start,
|
||||
* end,top] have all changed to new values (b/c init_memory_mapping
|
||||
* is called and setting up another new page-table). Hence, the first time
|
||||
* we enter this function, we save away the pgt_buf_start value and update
|
||||
* the pgt_buf_[end,top].
|
||||
*
|
||||
* When we detect that the "old" pgt_buf_start through pgt_buf_end
|
||||
* PFNs have been reserved (so memblock_x86_reserve_range has been called),
|
||||
* we immediately set out to RW the "old" pgt_buf_end through pgt_buf_top.
|
||||
*
|
||||
* And then we update those "old" pgt_buf_[end|top] with the new ones
|
||||
* so that we can redo this on the next pagetable.
|
||||
*/
|
||||
static __init void mark_rw_past_pgt(void) {
|
||||
|
||||
if (pgt_buf_end > pgt_buf_start) {
|
||||
u64 addr, size;
|
||||
|
||||
/* Save it away. */
|
||||
if (!__pgt_buf_start) {
|
||||
__pgt_buf_start = pgt_buf_start;
|
||||
__pgt_buf_end = pgt_buf_end;
|
||||
__pgt_buf_top = pgt_buf_top;
|
||||
return;
|
||||
}
|
||||
/* If we get the range that starts at __pgt_buf_end that means
|
||||
* the range is reserved, and that in 'init_memory_mapping'
|
||||
* the 'memblock_x86_reserve_range' has been called with the
|
||||
* outdated __pgt_buf_start, __pgt_buf_end (the "new"
|
||||
* pgt_buf_[start|end|top] refer now to a new pagetable.
|
||||
* Note: we are called _after_ the pgt_buf_[..] have been
|
||||
* updated.*/
|
||||
|
||||
addr = memblock_x86_find_in_range_size(PFN_PHYS(__pgt_buf_start),
|
||||
&size, PAGE_SIZE);
|
||||
|
||||
/* Still not reserved, meaning 'memblock_x86_reserve_range'
|
||||
* hasn't been called yet. Update the _end and _top.*/
|
||||
if (addr == PFN_PHYS(__pgt_buf_start)) {
|
||||
__pgt_buf_end = pgt_buf_end;
|
||||
__pgt_buf_top = pgt_buf_top;
|
||||
return;
|
||||
}
|
||||
|
||||
/* OK, the area is reserved, meaning it is time for us to
|
||||
* set RW for the old end->top PFNs. */
|
||||
|
||||
/* ..unless we had already done this. */
|
||||
if (__pgt_buf_end == __last_pgt_set_rw)
|
||||
return;
|
||||
|
||||
addr = PFN_PHYS(__pgt_buf_end);
|
||||
|
||||
/* set as RW the rest */
|
||||
printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n",
|
||||
PFN_PHYS(__pgt_buf_end), PFN_PHYS(__pgt_buf_top));
|
||||
|
||||
while (addr < PFN_PHYS(__pgt_buf_top)) {
|
||||
make_lowmem_page_readwrite(__va(addr));
|
||||
addr += PAGE_SIZE;
|
||||
}
|
||||
/* And update everything so that we are ready for the next
|
||||
* pagetable (the one created for regions past 4GB) */
|
||||
__last_pgt_set_rw = __pgt_buf_end;
|
||||
__pgt_buf_start = pgt_buf_start;
|
||||
__pgt_buf_end = pgt_buf_end;
|
||||
__pgt_buf_top = pgt_buf_top;
|
||||
}
|
||||
return;
|
||||
}
|
||||
#else
|
||||
static __init void mark_rw_past_pgt(void) { }
|
||||
#endif
|
||||
static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
||||
{
|
||||
#ifdef CONFIG_X86_64
|
||||
|
@ -1488,6 +1601,14 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
|
|||
{
|
||||
unsigned long pfn = pte_pfn(pte);
|
||||
|
||||
/*
|
||||
* A bit of optimization. We do not need to call the workaround
|
||||
* when xen_set_pte_init is called with a PTE with 0 as PFN.
|
||||
* That is b/c the pagetable at that point are just being populated
|
||||
* with empty values and we can save some cycles by not calling
|
||||
* the 'memblock' code.*/
|
||||
if (pfn)
|
||||
mark_rw_past_pgt();
|
||||
/*
|
||||
* If the new pfn is within the range of the newly allocated
|
||||
* kernel pagetable, and it isn't being mapped into an
|
||||
|
@ -1495,7 +1616,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
|
|||
* it is RO.
|
||||
*/
|
||||
if (((!is_early_ioremap_ptep(ptep) &&
|
||||
pfn >= pgt_buf_start && pfn < pgt_buf_end)) ||
|
||||
pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
|
||||
(is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
|
||||
pte = pte_wrprotect(pte);
|
||||
|
||||
|
@ -1997,6 +2118,8 @@ __init void xen_ident_map_ISA(void)
|
|||
|
||||
static __init void xen_post_allocator_init(void)
|
||||
{
|
||||
mark_rw_past_pgt();
|
||||
|
||||
#ifdef CONFIG_XEN_DEBUG
|
||||
pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
|
||||
#endif
|
||||
|
|
|
@ -777,9 +777,9 @@ static int rbd_do_request(struct request *rq,
|
|||
ops,
|
||||
false,
|
||||
GFP_NOIO, pages, bio);
|
||||
if (IS_ERR(req)) {
|
||||
if (!req) {
|
||||
up_read(&header->snap_rwsem);
|
||||
ret = PTR_ERR(req);
|
||||
ret = -ENOMEM;
|
||||
goto done_pages;
|
||||
}
|
||||
|
||||
|
|
|
@ -2199,7 +2199,6 @@ static int ohci_set_config_rom(struct fw_card *card,
|
|||
{
|
||||
struct fw_ohci *ohci;
|
||||
unsigned long flags;
|
||||
int ret = -EBUSY;
|
||||
__be32 *next_config_rom;
|
||||
dma_addr_t uninitialized_var(next_config_rom_bus);
|
||||
|
||||
|
@ -2240,22 +2239,37 @@ static int ohci_set_config_rom(struct fw_card *card,
|
|||
|
||||
spin_lock_irqsave(&ohci->lock, flags);
|
||||
|
||||
/*
|
||||
* If there is not an already pending config_rom update,
|
||||
* push our new allocation into the ohci->next_config_rom
|
||||
* and then mark the local variable as null so that we
|
||||
* won't deallocate the new buffer.
|
||||
*
|
||||
* OTOH, if there is a pending config_rom update, just
|
||||
* use that buffer with the new config_rom data, and
|
||||
* let this routine free the unused DMA allocation.
|
||||
*/
|
||||
|
||||
if (ohci->next_config_rom == NULL) {
|
||||
ohci->next_config_rom = next_config_rom;
|
||||
ohci->next_config_rom_bus = next_config_rom_bus;
|
||||
next_config_rom = NULL;
|
||||
}
|
||||
|
||||
copy_config_rom(ohci->next_config_rom, config_rom, length);
|
||||
|
||||
ohci->next_header = config_rom[0];
|
||||
ohci->next_config_rom[0] = 0;
|
||||
|
||||
reg_write(ohci, OHCI1394_ConfigROMmap,
|
||||
ohci->next_config_rom_bus);
|
||||
ret = 0;
|
||||
}
|
||||
reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
|
||||
|
||||
spin_unlock_irqrestore(&ohci->lock, flags);
|
||||
|
||||
/* If we didn't use the DMA allocation, delete it. */
|
||||
if (next_config_rom != NULL)
|
||||
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
|
||||
next_config_rom, next_config_rom_bus);
|
||||
|
||||
/*
|
||||
* Now initiate a bus reset to have the changes take
|
||||
* effect. We clean up the old config rom memory and DMA
|
||||
|
@ -2263,13 +2277,10 @@ static int ohci_set_config_rom(struct fw_card *card,
|
|||
* controller could need to access it before the bus reset
|
||||
* takes effect.
|
||||
*/
|
||||
if (ret == 0)
|
||||
fw_schedule_bus_reset(&ohci->card, true, true);
|
||||
else
|
||||
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
|
||||
next_config_rom, next_config_rom_bus);
|
||||
|
||||
return ret;
|
||||
fw_schedule_bus_reset(&ohci->card, true, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
|
||||
|
|
|
@ -932,11 +932,34 @@ EXPORT_SYMBOL(drm_vblank_put);
|
|||
|
||||
void drm_vblank_off(struct drm_device *dev, int crtc)
|
||||
{
|
||||
struct drm_pending_vblank_event *e, *t;
|
||||
struct timeval now;
|
||||
unsigned long irqflags;
|
||||
unsigned int seq;
|
||||
|
||||
spin_lock_irqsave(&dev->vbl_lock, irqflags);
|
||||
vblank_disable_and_save(dev, crtc);
|
||||
DRM_WAKEUP(&dev->vbl_queue[crtc]);
|
||||
|
||||
/* Send any queued vblank events, lest the natives grow disquiet */
|
||||
seq = drm_vblank_count_and_time(dev, crtc, &now);
|
||||
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
|
||||
if (e->pipe != crtc)
|
||||
continue;
|
||||
DRM_DEBUG("Sending premature vblank event on disable: \
|
||||
wanted %d, current %d\n",
|
||||
e->event.sequence, seq);
|
||||
|
||||
e->event.sequence = seq;
|
||||
e->event.tv_sec = now.tv_sec;
|
||||
e->event.tv_usec = now.tv_usec;
|
||||
drm_vblank_put(dev, e->pipe);
|
||||
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
|
||||
wake_up_interruptible(&e->base.file_priv->event_wait);
|
||||
trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
|
||||
e->event.sequence);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vblank_off);
|
||||
|
|
|
@ -862,9 +862,15 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
|
|||
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
|
||||
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
|
||||
EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
|
||||
WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
|
||||
WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
|
||||
} else {
|
||||
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
|
||||
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
|
||||
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
|
||||
}
|
||||
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
|
||||
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
|
||||
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
|
||||
|
@ -2923,11 +2929,6 @@ static int evergreen_startup(struct radeon_device *rdev)
|
|||
rdev->asic->copy = NULL;
|
||||
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
|
||||
}
|
||||
/* XXX: ontario has problems blitting to gart at the moment */
|
||||
if (rdev->family == CHIP_PALM) {
|
||||
rdev->asic->copy = NULL;
|
||||
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
|
||||
}
|
||||
|
||||
/* allocate wb buffer */
|
||||
r = radeon_wb_init(rdev);
|
||||
|
|
|
@ -221,6 +221,11 @@
|
|||
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
|
||||
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
|
||||
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
|
||||
|
||||
#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
|
||||
#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
|
||||
#define FUS_MC_VM_MD_L1_TLB2_CNTL 0x2664
|
||||
|
||||
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
|
||||
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
|
||||
#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
|
||||
|
|
|
@ -1599,9 +1599,10 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
|
|||
memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
|
||||
fake_edid_record->ucFakeEDIDLength);
|
||||
|
||||
if (drm_edid_is_valid(edid))
|
||||
if (drm_edid_is_valid(edid)) {
|
||||
rdev->mode_info.bios_hardcoded_edid = edid;
|
||||
else
|
||||
rdev->mode_info.bios_hardcoded_edid_size = edid_size;
|
||||
} else
|
||||
kfree(edid);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -234,6 +234,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case RADEON_INFO_FUSION_GART_WORKING:
|
||||
value = 1;
|
||||
break;
|
||||
default:
|
||||
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -68,8 +68,23 @@ struct wm831x_ts {
|
|||
unsigned int pd_irq;
|
||||
bool pressure;
|
||||
bool pen_down;
|
||||
struct work_struct pd_data_work;
|
||||
};
|
||||
|
||||
static void wm831x_pd_data_work(struct work_struct *work)
|
||||
{
|
||||
struct wm831x_ts *wm831x_ts =
|
||||
container_of(work, struct wm831x_ts, pd_data_work);
|
||||
|
||||
if (wm831x_ts->pen_down) {
|
||||
enable_irq(wm831x_ts->data_irq);
|
||||
dev_dbg(wm831x_ts->wm831x->dev, "IRQ PD->DATA done\n");
|
||||
} else {
|
||||
enable_irq(wm831x_ts->pd_irq);
|
||||
dev_dbg(wm831x_ts->wm831x->dev, "IRQ DATA->PD done\n");
|
||||
}
|
||||
}
|
||||
|
||||
static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data)
|
||||
{
|
||||
struct wm831x_ts *wm831x_ts = irq_data;
|
||||
|
@ -110,6 +125,9 @@ static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data)
|
|||
}
|
||||
|
||||
if (!wm831x_ts->pen_down) {
|
||||
/* Switch from data to pen down */
|
||||
dev_dbg(wm831x->dev, "IRQ DATA->PD\n");
|
||||
|
||||
disable_irq_nosync(wm831x_ts->data_irq);
|
||||
|
||||
/* Don't need data any more */
|
||||
|
@ -128,6 +146,10 @@ static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data)
|
|||
ABS_PRESSURE, 0);
|
||||
|
||||
input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 0);
|
||||
|
||||
schedule_work(&wm831x_ts->pd_data_work);
|
||||
} else {
|
||||
input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 1);
|
||||
}
|
||||
|
||||
input_sync(wm831x_ts->input_dev);
|
||||
|
@ -141,6 +163,11 @@ static irqreturn_t wm831x_ts_pen_down_irq(int irq, void *irq_data)
|
|||
struct wm831x *wm831x = wm831x_ts->wm831x;
|
||||
int ena = 0;
|
||||
|
||||
if (wm831x_ts->pen_down)
|
||||
return IRQ_HANDLED;
|
||||
|
||||
disable_irq_nosync(wm831x_ts->pd_irq);
|
||||
|
||||
/* Start collecting data */
|
||||
if (wm831x_ts->pressure)
|
||||
ena |= WM831X_TCH_Z_ENA;
|
||||
|
@ -149,14 +176,14 @@ static irqreturn_t wm831x_ts_pen_down_irq(int irq, void *irq_data)
|
|||
WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA,
|
||||
WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | ena);
|
||||
|
||||
input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 1);
|
||||
input_sync(wm831x_ts->input_dev);
|
||||
|
||||
wm831x_set_bits(wm831x, WM831X_INTERRUPT_STATUS_1,
|
||||
WM831X_TCHPD_EINT, WM831X_TCHPD_EINT);
|
||||
|
||||
wm831x_ts->pen_down = true;
|
||||
enable_irq(wm831x_ts->data_irq);
|
||||
|
||||
/* Switch from pen down to data */
|
||||
dev_dbg(wm831x->dev, "IRQ PD->DATA\n");
|
||||
schedule_work(&wm831x_ts->pd_data_work);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -182,13 +209,28 @@ static void wm831x_ts_input_close(struct input_dev *idev)
|
|||
struct wm831x_ts *wm831x_ts = input_get_drvdata(idev);
|
||||
struct wm831x *wm831x = wm831x_ts->wm831x;
|
||||
|
||||
/* Shut the controller down, disabling all other functionality too */
|
||||
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
|
||||
WM831X_TCH_ENA | WM831X_TCH_CVT_ENA |
|
||||
WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA |
|
||||
WM831X_TCH_Z_ENA, 0);
|
||||
WM831X_TCH_ENA | WM831X_TCH_X_ENA |
|
||||
WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA, 0);
|
||||
|
||||
if (wm831x_ts->pen_down)
|
||||
/* Make sure any pending IRQs are done, the above will prevent
|
||||
* new ones firing.
|
||||
*/
|
||||
synchronize_irq(wm831x_ts->data_irq);
|
||||
synchronize_irq(wm831x_ts->pd_irq);
|
||||
|
||||
/* Make sure the IRQ completion work is quiesced */
|
||||
flush_work_sync(&wm831x_ts->pd_data_work);
|
||||
|
||||
/* If we ended up with the pen down then make sure we revert back
|
||||
* to pen detection state for the next time we start up.
|
||||
*/
|
||||
if (wm831x_ts->pen_down) {
|
||||
disable_irq(wm831x_ts->data_irq);
|
||||
enable_irq(wm831x_ts->pd_irq);
|
||||
wm831x_ts->pen_down = false;
|
||||
}
|
||||
}
|
||||
|
||||
static __devinit int wm831x_ts_probe(struct platform_device *pdev)
|
||||
|
@ -198,7 +240,7 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
|
|||
struct wm831x_pdata *core_pdata = dev_get_platdata(pdev->dev.parent);
|
||||
struct wm831x_touch_pdata *pdata = NULL;
|
||||
struct input_dev *input_dev;
|
||||
int error;
|
||||
int error, irqf;
|
||||
|
||||
if (core_pdata)
|
||||
pdata = core_pdata->touch;
|
||||
|
@ -212,6 +254,7 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
|
|||
|
||||
wm831x_ts->wm831x = wm831x;
|
||||
wm831x_ts->input_dev = input_dev;
|
||||
INIT_WORK(&wm831x_ts->pd_data_work, wm831x_pd_data_work);
|
||||
|
||||
/*
|
||||
* If we have a direct IRQ use it, otherwise use the interrupt
|
||||
|
@ -270,9 +313,14 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
|
|||
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
|
||||
WM831X_TCH_RATE_MASK, 6);
|
||||
|
||||
if (pdata && pdata->data_irqf)
|
||||
irqf = pdata->data_irqf;
|
||||
else
|
||||
irqf = IRQF_TRIGGER_HIGH;
|
||||
|
||||
error = request_threaded_irq(wm831x_ts->data_irq,
|
||||
NULL, wm831x_ts_data_irq,
|
||||
IRQF_ONESHOT,
|
||||
irqf | IRQF_ONESHOT,
|
||||
"Touchscreen data", wm831x_ts);
|
||||
if (error) {
|
||||
dev_err(&pdev->dev, "Failed to request data IRQ %d: %d\n",
|
||||
|
@ -281,9 +329,14 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
|
|||
}
|
||||
disable_irq(wm831x_ts->data_irq);
|
||||
|
||||
if (pdata && pdata->pd_irqf)
|
||||
irqf = pdata->pd_irqf;
|
||||
else
|
||||
irqf = IRQF_TRIGGER_HIGH;
|
||||
|
||||
error = request_threaded_irq(wm831x_ts->pd_irq,
|
||||
NULL, wm831x_ts_pen_down_irq,
|
||||
IRQF_ONESHOT,
|
||||
irqf | IRQF_ONESHOT,
|
||||
"Touchscreen pen down", wm831x_ts);
|
||||
if (error) {
|
||||
dev_err(&pdev->dev, "Failed to request pen down IRQ %d: %d\n",
|
||||
|
|
|
@ -356,6 +356,8 @@ config DVB_USB_LME2510
|
|||
select DVB_TDA826X if !DVB_FE_CUSTOMISE
|
||||
select DVB_STV0288 if !DVB_FE_CUSTOMISE
|
||||
select DVB_IX2505V if !DVB_FE_CUSTOMISE
|
||||
select DVB_STV0299 if !DVB_FE_CUSTOMISE
|
||||
select DVB_PLL if !DVB_FE_CUSTOMISE
|
||||
help
|
||||
Say Y here to support the LME DM04/QQBOX DVB-S USB2.0 .
|
||||
|
||||
|
|
|
@ -1520,6 +1520,7 @@ static int init_channel(struct ngene_channel *chan)
|
|||
if (dev->ci.en && (io & NGENE_IO_TSOUT)) {
|
||||
dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1);
|
||||
set_transfer(chan, 1);
|
||||
chan->dev->channel[2].DataFormatFlags = DF_SWAP32;
|
||||
set_transfer(&chan->dev->channel[2], 1);
|
||||
dvb_register_device(adapter, &chan->ci_dev,
|
||||
&ngene_dvbdev_ci, (void *) chan,
|
||||
|
|
|
@ -376,7 +376,7 @@ static int __devinit saa7706h_probe(struct i2c_client *client,
|
|||
v4l_info(client, "chip found @ 0x%02x (%s)\n",
|
||||
client->addr << 1, client->adapter->name);
|
||||
|
||||
state = kmalloc(sizeof(struct saa7706h_state), GFP_KERNEL);
|
||||
state = kzalloc(sizeof(struct saa7706h_state), GFP_KERNEL);
|
||||
if (state == NULL)
|
||||
return -ENOMEM;
|
||||
sd = &state->sd;
|
||||
|
|
|
@ -176,7 +176,7 @@ static int __devinit tef6862_probe(struct i2c_client *client,
|
|||
v4l_info(client, "chip found @ 0x%02x (%s)\n",
|
||||
client->addr << 1, client->adapter->name);
|
||||
|
||||
state = kmalloc(sizeof(struct tef6862_state), GFP_KERNEL);
|
||||
state = kzalloc(sizeof(struct tef6862_state), GFP_KERNEL);
|
||||
if (state == NULL)
|
||||
return -ENOMEM;
|
||||
state->freq = TEF6862_LO_FREQ;
|
||||
|
|
|
@ -46,7 +46,7 @@
|
|||
#define MOD_AUTHOR "Jarod Wilson <jarod@wilsonet.com>"
|
||||
#define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display"
|
||||
#define MOD_NAME "imon"
|
||||
#define MOD_VERSION "0.9.2"
|
||||
#define MOD_VERSION "0.9.3"
|
||||
|
||||
#define DISPLAY_MINOR_BASE 144
|
||||
#define DEVICE_NAME "lcd%d"
|
||||
|
@ -460,8 +460,9 @@ static int display_close(struct inode *inode, struct file *file)
|
|||
}
|
||||
|
||||
/**
|
||||
* Sends a packet to the device -- this function must be called
|
||||
* with ictx->lock held.
|
||||
* Sends a packet to the device -- this function must be called with
|
||||
* ictx->lock held, or its unlock/lock sequence while waiting for tx
|
||||
* to complete can/will lead to a deadlock.
|
||||
*/
|
||||
static int send_packet(struct imon_context *ictx)
|
||||
{
|
||||
|
@ -991,12 +992,21 @@ static void imon_touch_display_timeout(unsigned long data)
|
|||
* the iMON remotes, and those used by the Windows MCE remotes (which is
|
||||
* really just RC-6), but only one or the other at a time, as the signals
|
||||
* are decoded onboard the receiver.
|
||||
*
|
||||
* This function gets called two different ways, one way is from
|
||||
* rc_register_device, for initial protocol selection/setup, and the other is
|
||||
* via a userspace-initiated protocol change request, either by direct sysfs
|
||||
* prodding or by something like ir-keytable. In the rc_register_device case,
|
||||
* the imon context lock is already held, but when initiated from userspace,
|
||||
* it is not, so we must acquire it prior to calling send_packet, which
|
||||
* requires that the lock is held.
|
||||
*/
|
||||
static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
|
||||
{
|
||||
int retval;
|
||||
struct imon_context *ictx = rc->priv;
|
||||
struct device *dev = ictx->dev;
|
||||
bool unlock = false;
|
||||
unsigned char ir_proto_packet[] = {
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
|
||||
|
||||
|
@ -1029,6 +1039,11 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
|
|||
|
||||
memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet));
|
||||
|
||||
if (!mutex_is_locked(&ictx->lock)) {
|
||||
unlock = true;
|
||||
mutex_lock(&ictx->lock);
|
||||
}
|
||||
|
||||
retval = send_packet(ictx);
|
||||
if (retval)
|
||||
goto out;
|
||||
|
@ -1037,6 +1052,9 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
|
|||
ictx->pad_mouse = false;
|
||||
|
||||
out:
|
||||
if (unlock)
|
||||
mutex_unlock(&ictx->lock);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
@ -2134,6 +2152,7 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
|
|||
goto rdev_setup_failed;
|
||||
}
|
||||
|
||||
mutex_unlock(&ictx->lock);
|
||||
return ictx;
|
||||
|
||||
rdev_setup_failed:
|
||||
|
@ -2205,6 +2224,7 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
|
|||
goto urb_submit_failed;
|
||||
}
|
||||
|
||||
mutex_unlock(&ictx->lock);
|
||||
return ictx;
|
||||
|
||||
urb_submit_failed:
|
||||
|
@ -2299,6 +2319,8 @@ static int __devinit imon_probe(struct usb_interface *interface,
|
|||
usb_set_intfdata(interface, ictx);
|
||||
|
||||
if (ifnum == 0) {
|
||||
mutex_lock(&ictx->lock);
|
||||
|
||||
if (product == 0xffdc && ictx->rf_device) {
|
||||
sysfs_err = sysfs_create_group(&interface->dev.kobj,
|
||||
&imon_rf_attr_group);
|
||||
|
@ -2309,13 +2331,14 @@ static int __devinit imon_probe(struct usb_interface *interface,
|
|||
|
||||
if (ictx->display_supported)
|
||||
imon_init_display(ictx, interface);
|
||||
|
||||
mutex_unlock(&ictx->lock);
|
||||
}
|
||||
|
||||
dev_info(dev, "iMON device (%04x:%04x, intf%d) on "
|
||||
"usb<%d:%d> initialized\n", vendor, product, ifnum,
|
||||
usbdev->bus->busnum, usbdev->devnum);
|
||||
|
||||
mutex_unlock(&ictx->lock);
|
||||
mutex_unlock(&driver_lock);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include <linux/io.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/bitops.h>
|
||||
|
|
|
@ -220,6 +220,8 @@ static struct usb_device_id mceusb_dev_table[] = {
|
|||
{ USB_DEVICE(VENDOR_PHILIPS, 0x206c) },
|
||||
/* Philips/Spinel plus IR transceiver for ASUS */
|
||||
{ USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
|
||||
/* Philips IR transceiver (Dell branded) */
|
||||
{ USB_DEVICE(VENDOR_PHILIPS, 0x2093) },
|
||||
/* Realtek MCE IR Receiver and card reader */
|
||||
{ USB_DEVICE(VENDOR_REALTEK, 0x0161),
|
||||
.driver_info = MULTIFUNCTION },
|
||||
|
|
|
@ -707,6 +707,7 @@ static void ir_close(struct input_dev *idev)
|
|||
{
|
||||
struct rc_dev *rdev = input_get_drvdata(idev);
|
||||
|
||||
if (rdev)
|
||||
rdev->close(rdev);
|
||||
}
|
||||
|
||||
|
@ -733,6 +734,7 @@ static struct {
|
|||
{ RC_TYPE_SONY, "sony" },
|
||||
{ RC_TYPE_RC5_SZ, "rc-5-sz" },
|
||||
{ RC_TYPE_LIRC, "lirc" },
|
||||
{ RC_TYPE_OTHER, "other" },
|
||||
};
|
||||
|
||||
#define PROTO_NONE "none"
|
||||
|
|
|
@ -174,7 +174,7 @@ static int m52790_probe(struct i2c_client *client,
|
|||
v4l_info(client, "chip found @ 0x%x (%s)\n",
|
||||
client->addr << 1, client->adapter->name);
|
||||
|
||||
state = kmalloc(sizeof(struct m52790_state), GFP_KERNEL);
|
||||
state = kzalloc(sizeof(struct m52790_state), GFP_KERNEL);
|
||||
if (state == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -171,7 +171,7 @@ static int tda9840_probe(struct i2c_client *client,
|
|||
v4l_info(client, "chip found @ 0x%x (%s)\n",
|
||||
client->addr << 1, client->adapter->name);
|
||||
|
||||
sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
|
||||
sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
|
||||
if (sd == NULL)
|
||||
return -ENOMEM;
|
||||
v4l2_i2c_subdev_init(sd, client, &tda9840_ops);
|
||||
|
|
|
@ -152,7 +152,7 @@ static int tea6415c_probe(struct i2c_client *client,
|
|||
|
||||
v4l_info(client, "chip found @ 0x%x (%s)\n",
|
||||
client->addr << 1, client->adapter->name);
|
||||
sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
|
||||
sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
|
||||
if (sd == NULL)
|
||||
return -ENOMEM;
|
||||
v4l2_i2c_subdev_init(sd, client, &tea6415c_ops);
|
||||
|
|
|
@ -125,7 +125,7 @@ static int tea6420_probe(struct i2c_client *client,
|
|||
v4l_info(client, "chip found @ 0x%x (%s)\n",
|
||||
client->addr << 1, client->adapter->name);
|
||||
|
||||
sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
|
||||
sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
|
||||
if (sd == NULL)
|
||||
return -ENOMEM;
|
||||
v4l2_i2c_subdev_init(sd, client, &tea6420_ops);
|
||||
|
|
|
@ -230,7 +230,7 @@ static int upd64031a_probe(struct i2c_client *client,
|
|||
v4l_info(client, "chip found @ 0x%x (%s)\n",
|
||||
client->addr << 1, client->adapter->name);
|
||||
|
||||
state = kmalloc(sizeof(struct upd64031a_state), GFP_KERNEL);
|
||||
state = kzalloc(sizeof(struct upd64031a_state), GFP_KERNEL);
|
||||
if (state == NULL)
|
||||
return -ENOMEM;
|
||||
sd = &state->sd;
|
||||
|
|
|
@ -202,7 +202,7 @@ static int upd64083_probe(struct i2c_client *client,
|
|||
v4l_info(client, "chip found @ 0x%x (%s)\n",
|
||||
client->addr << 1, client->adapter->name);
|
||||
|
||||
state = kmalloc(sizeof(struct upd64083_state), GFP_KERNEL);
|
||||
state = kzalloc(sizeof(struct upd64083_state), GFP_KERNEL);
|
||||
if (state == NULL)
|
||||
return -ENOMEM;
|
||||
sd = &state->sd;
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <linux/dma-mapping.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <plat/usb.h>
|
||||
|
||||
#define USBHS_DRIVER_NAME "usbhs-omap"
|
||||
|
@ -700,8 +699,7 @@ static int usbhs_enable(struct device *dev)
|
|||
dev_dbg(dev, "starting TI HSUSB Controller\n");
|
||||
if (!pdata) {
|
||||
dev_dbg(dev, "missing platform_data\n");
|
||||
ret = -ENODEV;
|
||||
goto end_enable;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&omap->lock, flags);
|
||||
|
@ -915,7 +913,8 @@ static int usbhs_enable(struct device *dev)
|
|||
|
||||
end_count:
|
||||
omap->count++;
|
||||
goto end_enable;
|
||||
spin_unlock_irqrestore(&omap->lock, flags);
|
||||
return 0;
|
||||
|
||||
err_tll:
|
||||
if (pdata->ehci_data->phy_reset) {
|
||||
|
@ -931,8 +930,6 @@ err_tll:
|
|||
clk_disable(omap->usbhost_fs_fck);
|
||||
clk_disable(omap->usbhost_hs_fck);
|
||||
clk_disable(omap->usbhost_ick);
|
||||
|
||||
end_enable:
|
||||
spin_unlock_irqrestore(&omap->lock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -284,6 +284,7 @@ int mmc_add_card(struct mmc_card *card)
|
|||
type = "SD-combo";
|
||||
if (mmc_card_blockaddr(card))
|
||||
type = "SDHC-combo";
|
||||
break;
|
||||
default:
|
||||
type = "?";
|
||||
break;
|
||||
|
|
|
@ -94,7 +94,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
|
|||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
return;
|
||||
}
|
||||
mutex_lock(&host->clk_gate_mutex);
|
||||
mmc_claim_host(host);
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
if (!host->clk_requests) {
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
|
@ -104,7 +104,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
|
|||
pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
|
||||
}
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
mutex_unlock(&host->clk_gate_mutex);
|
||||
mmc_release_host(host);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -130,7 +130,7 @@ void mmc_host_clk_ungate(struct mmc_host *host)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
mutex_lock(&host->clk_gate_mutex);
|
||||
mmc_claim_host(host);
|
||||
spin_lock_irqsave(&host->clk_lock, flags);
|
||||
if (host->clk_gated) {
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
|
@ -140,7 +140,7 @@ void mmc_host_clk_ungate(struct mmc_host *host)
|
|||
}
|
||||
host->clk_requests++;
|
||||
spin_unlock_irqrestore(&host->clk_lock, flags);
|
||||
mutex_unlock(&host->clk_gate_mutex);
|
||||
mmc_release_host(host);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -215,7 +215,6 @@ static inline void mmc_host_clk_init(struct mmc_host *host)
|
|||
host->clk_gated = false;
|
||||
INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
|
||||
spin_lock_init(&host->clk_lock);
|
||||
mutex_init(&host->clk_gate_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -832,7 +832,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
if (end_command)
|
||||
if (end_command && host->cmd)
|
||||
mmc_omap_cmd_done(host, host->cmd);
|
||||
if (host->data != NULL) {
|
||||
if (transfer_error)
|
||||
|
|
|
@ -957,6 +957,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
|
|||
host->ioaddr = pci_ioremap_bar(pdev, bar);
|
||||
if (!host->ioaddr) {
|
||||
dev_err(&pdev->dev, "failed to remap registers\n");
|
||||
ret = -ENOMEM;
|
||||
goto release;
|
||||
}
|
||||
|
||||
|
|
|
@ -1334,6 +1334,13 @@ static void sdhci_tasklet_finish(unsigned long param)
|
|||
|
||||
host = (struct sdhci_host*)param;
|
||||
|
||||
/*
|
||||
* If this tasklet gets rescheduled while running, it will
|
||||
* be run again afterwards but without any active request.
|
||||
*/
|
||||
if (!host->mrq)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&host->lock, flags);
|
||||
|
||||
del_timer(&host->timer);
|
||||
|
@ -1345,7 +1352,7 @@ static void sdhci_tasklet_finish(unsigned long param)
|
|||
* upon error conditions.
|
||||
*/
|
||||
if (!(host->flags & SDHCI_DEVICE_DEAD) &&
|
||||
(mrq->cmd->error ||
|
||||
((mrq->cmd && mrq->cmd->error) ||
|
||||
(mrq->data && (mrq->data->error ||
|
||||
(mrq->data->stop && mrq->data->stop->error))) ||
|
||||
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {
|
||||
|
|
|
@ -728,15 +728,15 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
tmio_mmc_set_clock(host, ios->clock);
|
||||
|
||||
/* Power sequence - OFF -> UP -> ON */
|
||||
if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
|
||||
if (ios->power_mode == MMC_POWER_UP) {
|
||||
/* power up SD bus */
|
||||
if (host->set_pwr)
|
||||
host->set_pwr(host->pdev, 1);
|
||||
} else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
|
||||
/* power down SD bus */
|
||||
if (ios->power_mode == MMC_POWER_OFF && host->set_pwr)
|
||||
host->set_pwr(host->pdev, 0);
|
||||
tmio_mmc_clk_stop(host);
|
||||
} else if (ios->power_mode == MMC_POWER_UP) {
|
||||
/* power up SD bus */
|
||||
if (host->set_pwr)
|
||||
host->set_pwr(host->pdev, 1);
|
||||
} else {
|
||||
/* start bus clock */
|
||||
tmio_mmc_clk_start(host);
|
||||
|
|
|
@ -2288,7 +2288,3 @@ err_dev:
|
|||
free_netdev(dev);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(init_ft1000_card);
|
||||
EXPORT_SYMBOL(stop_ft1000_card);
|
||||
EXPORT_SYMBOL(flarion_ft1000_cnt);
|
||||
|
|
|
@ -214,6 +214,3 @@ void ft1000CleanupProc(struct net_device *dev)
|
|||
remove_proc_entry(FT1000_PROC, init_net.proc_net);
|
||||
unregister_netdevice_notifier(&ft1000_netdev_notifier);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(ft1000InitProc);
|
||||
EXPORT_SYMBOL(ft1000CleanupProc);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
config DRM_PSB
|
||||
tristate "Intel GMA500 KMS Framebuffer"
|
||||
depends on DRM && PCI
|
||||
depends on DRM && PCI && X86
|
||||
select FB_CFB_COPYAREA
|
||||
select FB_CFB_FILLRECT
|
||||
select FB_CFB_IMAGEBLIT
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/file.h>
|
||||
#include <asm/mrst.h>
|
||||
#include <sound/pcm.h>
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/pci.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/file.h>
|
||||
#include "intel_sst.h"
|
||||
#include "intelmid_snd_control.h"
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
*/
|
||||
#include <linux/cs5535.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/olpc.h>
|
||||
|
||||
#include "olpc_dcon.h"
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
|
||||
#define RTSX_STOR "rts_pstor: "
|
||||
|
||||
#if CONFIG_RTS_PSTOR_DEBUG
|
||||
#ifdef CONFIG_RTS_PSTOR_DEBUG
|
||||
#define RTSX_DEBUGP(x...) printk(KERN_DEBUG RTSX_STOR x)
|
||||
#define RTSX_DEBUGPN(x...) printk(KERN_DEBUG x)
|
||||
#define RTSX_DEBUGPX(x...) printk(x)
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "rtsx.h"
|
||||
#include "rtsx_transport.h"
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/kthread.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/workqueue.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "rtsx.h"
|
||||
#include "rtsx_transport.h"
|
||||
|
@ -1311,7 +1312,7 @@ void rtsx_polling_func(struct rtsx_chip *chip)
|
|||
|
||||
#ifdef SUPPORT_OCP
|
||||
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
|
||||
#if CONFIG_RTS_PSTOR_DEBUG
|
||||
#ifdef CONFIG_RTS_PSTOR_DEBUG
|
||||
if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER | MS_OC_NOW | MS_OC_EVER)) {
|
||||
RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n", chip->ocp_stat);
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "rtsx.h"
|
||||
#include "rtsx_transport.h"
|
||||
|
|
|
@ -909,7 +909,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
|
|||
RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET);
|
||||
RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, 0);
|
||||
} else {
|
||||
#if CONFIG_RTS_PSTOR_DEBUG
|
||||
#ifdef CONFIG_RTS_PSTOR_DEBUG
|
||||
rtsx_read_register(chip, SD_VP_CTL, &val);
|
||||
RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val);
|
||||
rtsx_read_register(chip, SD_DCMPS_CTL, &val);
|
||||
|
@ -958,7 +958,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
|
|||
return STATUS_SUCCESS;
|
||||
|
||||
Fail:
|
||||
#if CONFIG_RTS_PSTOR_DEBUG
|
||||
#ifdef CONFIG_RTS_PSTOR_DEBUG
|
||||
rtsx_read_register(chip, SD_VP_CTL, &val);
|
||||
RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val);
|
||||
rtsx_read_register(chip, SD_DCMPS_CTL, &val);
|
||||
|
|
|
@ -82,7 +82,7 @@ do { \
|
|||
#define TRACE_GOTO(chip, label) goto label
|
||||
#endif
|
||||
|
||||
#if CONFIG_RTS_PSTOR_DEBUG
|
||||
#ifdef CONFIG_RTS_PSTOR_DEBUG
|
||||
static inline void rtsx_dump(u8 *buf, int buf_len)
|
||||
{
|
||||
int i;
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/kthread.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "rtsx.h"
|
||||
#include "rtsx_transport.h"
|
||||
|
|
|
@ -2,6 +2,7 @@ config SOLO6X10
|
|||
tristate "Softlogic 6x10 MPEG codec cards"
|
||||
depends on PCI && VIDEO_DEV && SND && I2C
|
||||
select VIDEOBUF_DMA_SG
|
||||
select SND_PCM
|
||||
---help---
|
||||
This driver supports the Softlogic based MPEG-4 and h.264 codec
|
||||
codec cards.
|
||||
|
|
|
@ -876,7 +876,9 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
|
|||
}
|
||||
|
||||
/* kill threads related to this sdev, if v.c. exists */
|
||||
if (vdev->ud.tcp_rx)
|
||||
kthread_stop(vdev->ud.tcp_rx);
|
||||
if (vdev->ud.tcp_tx)
|
||||
kthread_stop(vdev->ud.tcp_tx);
|
||||
|
||||
usbip_uinfo("stop threads\n");
|
||||
|
@ -949,9 +951,6 @@ static void vhci_device_init(struct vhci_device *vdev)
|
|||
{
|
||||
memset(vdev, 0, sizeof(*vdev));
|
||||
|
||||
vdev->ud.tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx");
|
||||
vdev->ud.tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx");
|
||||
|
||||
vdev->ud.side = USBIP_VHCI;
|
||||
vdev->ud.status = VDEV_ST_NULL;
|
||||
/* vdev->ud.lock = SPIN_LOCK_UNLOCKED; */
|
||||
|
@ -1139,7 +1138,7 @@ static int vhci_hcd_probe(struct platform_device *pdev)
|
|||
usbip_uerr("create hcd failed\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
hcd->has_tt = 1;
|
||||
|
||||
/* this is private data for vhci_hcd */
|
||||
the_controller = hcd_to_vhci(hcd);
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include "vhci.h"
|
||||
|
||||
#include <linux/in.h>
|
||||
#include <linux/kthread.h>
|
||||
|
||||
/* TODO: refine locking ?*/
|
||||
|
||||
|
@ -220,13 +221,13 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
|
|||
vdev->ud.tcp_socket = socket;
|
||||
vdev->ud.status = VDEV_ST_NOTASSIGNED;
|
||||
|
||||
wake_up_process(vdev->ud.tcp_rx);
|
||||
wake_up_process(vdev->ud.tcp_tx);
|
||||
|
||||
spin_unlock(&vdev->ud.lock);
|
||||
spin_unlock(&the_controller->lock);
|
||||
/* end the lock */
|
||||
|
||||
vdev->ud.tcp_rx = kthread_run(vhci_rx_loop, &vdev->ud, "vhci_rx");
|
||||
vdev->ud.tcp_tx = kthread_run(vhci_tx_loop, &vdev->ud, "vhci_tx");
|
||||
|
||||
rh_port_connect(rhport, speed);
|
||||
|
||||
return count;
|
||||
|
|
|
@ -273,7 +273,7 @@ exit:
|
|||
}
|
||||
|
||||
int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev,
|
||||
u8 key_index)
|
||||
u8 key_index, bool unicast, bool multicast)
|
||||
{
|
||||
wlandevice_t *wlandev = dev->ml_priv;
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/usb/ulpi.h>
|
||||
#include <plat/usb.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
|
||||
/* EHCI Register Set */
|
||||
#define EHCI_INSNREG04 (0xA0)
|
||||
|
@ -118,6 +119,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
|
|||
struct ehci_hcd *omap_ehci;
|
||||
int ret = -ENODEV;
|
||||
int irq;
|
||||
int i;
|
||||
char supply[7];
|
||||
|
||||
if (usb_disabled())
|
||||
return -ENODEV;
|
||||
|
@ -158,6 +161,23 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
|
|||
hcd->rsrc_len = resource_size(res);
|
||||
hcd->regs = regs;
|
||||
|
||||
/* get ehci regulator and enable */
|
||||
for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
|
||||
if (pdata->port_mode[i] != OMAP_EHCI_PORT_MODE_PHY) {
|
||||
pdata->regulator[i] = NULL;
|
||||
continue;
|
||||
}
|
||||
snprintf(supply, sizeof(supply), "hsusb%d", i);
|
||||
pdata->regulator[i] = regulator_get(dev, supply);
|
||||
if (IS_ERR(pdata->regulator[i])) {
|
||||
pdata->regulator[i] = NULL;
|
||||
dev_dbg(dev,
|
||||
"failed to get ehci port%d regulator\n", i);
|
||||
} else {
|
||||
regulator_enable(pdata->regulator[i]);
|
||||
}
|
||||
}
|
||||
|
||||
ret = omap_usbhs_enable(dev);
|
||||
if (ret) {
|
||||
dev_err(dev, "failed to start usbhs with err %d\n", ret);
|
||||
|
|
|
@ -1633,6 +1633,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
|
|||
ints[i].qh = NULL;
|
||||
ints[i].qtd = NULL;
|
||||
|
||||
urb->status = status;
|
||||
isp1760_urb_done(hcd, urb);
|
||||
if (qtd)
|
||||
pe(hcd, qh, qtd);
|
||||
|
|
|
@ -777,7 +777,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
|
|||
if (t1 != t2)
|
||||
xhci_writel(xhci, t2, port_array[port_index]);
|
||||
|
||||
if (DEV_HIGHSPEED(t1)) {
|
||||
if (hcd->speed != HCD_USB3) {
|
||||
/* enable remote wake up for USB 2.0 */
|
||||
u32 __iomem *addr;
|
||||
u32 tmp;
|
||||
|
@ -866,6 +866,21 @@ int xhci_bus_resume(struct usb_hcd *hcd)
|
|||
temp |= PORT_LINK_STROBE | XDEV_U0;
|
||||
xhci_writel(xhci, temp, port_array[port_index]);
|
||||
}
|
||||
/* wait for the port to enter U0 and report port link
|
||||
* state change.
|
||||
*/
|
||||
spin_unlock_irqrestore(&xhci->lock, flags);
|
||||
msleep(20);
|
||||
spin_lock_irqsave(&xhci->lock, flags);
|
||||
|
||||
/* Clear PLC */
|
||||
temp = xhci_readl(xhci, port_array[port_index]);
|
||||
if (temp & PORT_PLC) {
|
||||
temp = xhci_port_state_to_neutral(temp);
|
||||
temp |= PORT_PLC;
|
||||
xhci_writel(xhci, temp, port_array[port_index]);
|
||||
}
|
||||
|
||||
slot_id = xhci_find_slot_id_by_port(hcd,
|
||||
xhci, port_index + 1);
|
||||
if (slot_id)
|
||||
|
@ -873,7 +888,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
|
|||
} else
|
||||
xhci_writel(xhci, temp, port_array[port_index]);
|
||||
|
||||
if (DEV_HIGHSPEED(temp)) {
|
||||
if (hcd->speed != HCD_USB3) {
|
||||
/* disable remote wake up for USB 2.0 */
|
||||
u32 __iomem *addr;
|
||||
u32 tmp;
|
||||
|
|
|
@ -1887,12 +1887,10 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
|
|||
otg_set_vbus(musb->xceiv, 1);
|
||||
|
||||
hcd->self.uses_pio_for_control = 1;
|
||||
|
||||
}
|
||||
if (musb->xceiv->last_event == USB_EVENT_NONE)
|
||||
pm_runtime_put(musb->controller);
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err2:
|
||||
|
|
|
@ -270,7 +270,7 @@ static int musb_otg_notifications(struct notifier_block *nb,
|
|||
DBG(4, "VBUS Disconnect\n");
|
||||
|
||||
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
|
||||
if (is_otg_enabled(musb))
|
||||
if (is_otg_enabled(musb) || is_peripheral_enabled(musb))
|
||||
if (musb->gadget_driver)
|
||||
#endif
|
||||
{
|
||||
|
|
|
@ -775,6 +775,13 @@ get_more_pages:
|
|||
ci->i_truncate_seq,
|
||||
ci->i_truncate_size,
|
||||
&inode->i_mtime, true, 1, 0);
|
||||
|
||||
if (!req) {
|
||||
rc = -ENOMEM;
|
||||
unlock_page(page);
|
||||
break;
|
||||
}
|
||||
|
||||
max_pages = req->r_num_pages;
|
||||
|
||||
alloc_page_vec(fsc, req);
|
||||
|
|
|
@ -1331,10 +1331,11 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci)
|
|||
}
|
||||
|
||||
/*
|
||||
* Mark caps dirty. If inode is newly dirty, add to the global dirty
|
||||
* list.
|
||||
* Mark caps dirty. If inode is newly dirty, return the dirty flags.
|
||||
* Caller is then responsible for calling __mark_inode_dirty with the
|
||||
* returned flags value.
|
||||
*/
|
||||
void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
|
||||
int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
|
||||
{
|
||||
struct ceph_mds_client *mdsc =
|
||||
ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
|
||||
|
@ -1357,7 +1358,7 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
|
|||
list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
|
||||
spin_unlock(&mdsc->cap_dirty_lock);
|
||||
if (ci->i_flushing_caps == 0) {
|
||||
igrab(inode);
|
||||
ihold(inode);
|
||||
dirty |= I_DIRTY_SYNC;
|
||||
}
|
||||
}
|
||||
|
@ -1365,9 +1366,8 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
|
|||
if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
|
||||
(mask & CEPH_CAP_FILE_BUFFER))
|
||||
dirty |= I_DIRTY_DATASYNC;
|
||||
if (dirty)
|
||||
__mark_inode_dirty(inode, dirty);
|
||||
__cap_delay_requeue(mdsc, ci);
|
||||
return dirty;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1991,7 +1991,7 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got)
|
|||
ci->i_wr_ref++;
|
||||
if (got & CEPH_CAP_FILE_BUFFER) {
|
||||
if (ci->i_wrbuffer_ref == 0)
|
||||
igrab(&ci->vfs_inode);
|
||||
ihold(&ci->vfs_inode);
|
||||
ci->i_wrbuffer_ref++;
|
||||
dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n",
|
||||
&ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref);
|
||||
|
|
|
@ -734,9 +734,12 @@ retry_snap:
|
|||
}
|
||||
}
|
||||
if (ret >= 0) {
|
||||
int dirty;
|
||||
spin_lock(&inode->i_lock);
|
||||
__ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
|
||||
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
|
||||
spin_unlock(&inode->i_lock);
|
||||
if (dirty)
|
||||
__mark_inode_dirty(inode, dirty);
|
||||
}
|
||||
|
||||
out:
|
||||
|
|
|
@ -1567,6 +1567,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
int release = 0, dirtied = 0;
|
||||
int mask = 0;
|
||||
int err = 0;
|
||||
int inode_dirty_flags = 0;
|
||||
|
||||
if (ceph_snap(inode) != CEPH_NOSNAP)
|
||||
return -EROFS;
|
||||
|
@ -1725,13 +1726,16 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
dout("setattr %p ATTR_FILE ... hrm!\n", inode);
|
||||
|
||||
if (dirtied) {
|
||||
__ceph_mark_dirty_caps(ci, dirtied);
|
||||
inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied);
|
||||
inode->i_ctime = CURRENT_TIME;
|
||||
}
|
||||
|
||||
release &= issued;
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
if (inode_dirty_flags)
|
||||
__mark_inode_dirty(inode, inode_dirty_flags);
|
||||
|
||||
if (mask) {
|
||||
req->r_inode = igrab(inode);
|
||||
req->r_inode_drop = release;
|
||||
|
|
|
@ -506,7 +506,7 @@ static inline int __ceph_caps_dirty(struct ceph_inode_info *ci)
|
|||
{
|
||||
return ci->i_dirty_caps | ci->i_flushing_caps;
|
||||
}
|
||||
extern void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask);
|
||||
extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask);
|
||||
|
||||
extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask);
|
||||
extern int __ceph_caps_used(struct ceph_inode_info *ci);
|
||||
|
|
|
@ -703,6 +703,7 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
|
|||
struct ceph_inode_xattr *xattr = NULL;
|
||||
int issued;
|
||||
int required_blob_size;
|
||||
int dirty;
|
||||
|
||||
if (ceph_snap(inode) != CEPH_NOSNAP)
|
||||
return -EROFS;
|
||||
|
@ -763,11 +764,12 @@ retry:
|
|||
dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
|
||||
err = __set_xattr(ci, newname, name_len, newval,
|
||||
val_len, 1, 1, 1, &xattr);
|
||||
__ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
|
||||
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
|
||||
ci->i_xattrs.dirty = true;
|
||||
inode->i_ctime = CURRENT_TIME;
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
if (dirty)
|
||||
__mark_inode_dirty(inode, dirty);
|
||||
return err;
|
||||
|
||||
do_sync:
|
||||
|
@ -810,6 +812,7 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
|
|||
struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
|
||||
int issued;
|
||||
int err;
|
||||
int dirty;
|
||||
|
||||
if (ceph_snap(inode) != CEPH_NOSNAP)
|
||||
return -EROFS;
|
||||
|
@ -833,12 +836,13 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
|
|||
goto do_sync;
|
||||
|
||||
err = __remove_xattr_by_name(ceph_inode(inode), name);
|
||||
__ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
|
||||
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
|
||||
ci->i_xattrs.dirty = true;
|
||||
inode->i_ctime = CURRENT_TIME;
|
||||
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
||||
if (dirty)
|
||||
__mark_inode_dirty(inode, dirty);
|
||||
return err;
|
||||
do_sync:
|
||||
spin_unlock(&inode->i_lock);
|
||||
|
|
|
@ -480,10 +480,6 @@ static int logfs_read_sb(struct super_block *sb, int read_only)
|
|||
!read_only)
|
||||
return -EIO;
|
||||
|
||||
mutex_init(&super->s_dirop_mutex);
|
||||
mutex_init(&super->s_object_alias_mutex);
|
||||
INIT_LIST_HEAD(&super->s_freeing_list);
|
||||
|
||||
ret = logfs_init_rw(sb);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -601,6 +597,10 @@ static struct dentry *logfs_mount(struct file_system_type *type, int flags,
|
|||
if (!super)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
mutex_init(&super->s_dirop_mutex);
|
||||
mutex_init(&super->s_object_alias_mutex);
|
||||
INIT_LIST_HEAD(&super->s_freeing_list);
|
||||
|
||||
if (!devname)
|
||||
err = logfs_get_sb_bdev(super, type, devname);
|
||||
else if (strncmp(devname, "mtd", 3))
|
||||
|
|
|
@ -155,6 +155,7 @@
|
|||
{0x1002, 0x6719, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x671c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x671d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x671f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAYMAN|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6720, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6721, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6722, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
|
||||
|
@ -167,6 +168,7 @@
|
|||
{0x1002, 0x6729, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6738, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6739, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x673e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BARTS|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6740, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6741, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6742, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
|
@ -199,6 +201,7 @@
|
|||
{0x1002, 0x688D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6898, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x6899, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x689b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x689c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x689d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HEMLOCK|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x689e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYPRESS|RADEON_NEW_MEMMAP}, \
|
||||
|
@ -209,7 +212,9 @@
|
|||
{0x1002, 0x68b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68b9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68ba, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68be, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_JUNIPER|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68c1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
{0x1002, 0x68c7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_REDWOOD|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
|
||||
|
|
|
@ -910,6 +910,7 @@ struct drm_radeon_cs {
|
|||
#define RADEON_INFO_CLOCK_CRYSTAL_FREQ 0x09 /* clock crystal frequency */
|
||||
#define RADEON_INFO_NUM_BACKENDS 0x0a /* DB/backends for r600+ - need for OQ */
|
||||
#define RADEON_INFO_NUM_TILE_PIPES 0x0b /* tile pipes for r600+ */
|
||||
#define RADEON_INFO_FUSION_GART_WORKING 0x0c /* fusion writes to GTT were broken before this */
|
||||
|
||||
struct drm_radeon_info {
|
||||
uint32_t request;
|
||||
|
|
|
@ -61,7 +61,7 @@ struct flex_array {
|
|||
struct flex_array *flex_array_alloc(int element_size, unsigned int total,
|
||||
gfp_t flags);
|
||||
int flex_array_prealloc(struct flex_array *fa, unsigned int start,
|
||||
unsigned int end, gfp_t flags);
|
||||
unsigned int nr_elements, gfp_t flags);
|
||||
void flex_array_free(struct flex_array *fa);
|
||||
void flex_array_free_parts(struct flex_array *fa);
|
||||
int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
|
||||
|
|
|
@ -81,7 +81,9 @@ struct wm831x_touch_pdata {
|
|||
int rpu; /** Pen down sensitivity resistor divider */
|
||||
int pressure; /** Report pressure (boolean) */
|
||||
unsigned int data_irq; /** Touch data ready IRQ */
|
||||
int data_irqf; /** IRQ flags for data ready IRQ */
|
||||
unsigned int pd_irq; /** Touch pendown detect IRQ */
|
||||
int pd_irqf; /** IRQ flags for pen down IRQ */
|
||||
};
|
||||
|
||||
enum wm831x_watchdog_action {
|
||||
|
|
|
@ -183,7 +183,6 @@ struct mmc_host {
|
|||
struct work_struct clk_gate_work; /* delayed clock gate */
|
||||
unsigned int clk_old; /* old clock value cache */
|
||||
spinlock_t clk_lock; /* lock for clk fields */
|
||||
struct mutex clk_gate_mutex; /* mutex for clock gating */
|
||||
#endif
|
||||
|
||||
/* host specific block data */
|
||||
|
|
|
@ -948,7 +948,7 @@ do { \
|
|||
irqsafe_generic_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2)
|
||||
# endif
|
||||
# define irqsafe_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
|
||||
__pcpu_double_call_return_int(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
|
||||
__pcpu_double_call_return_bool(irqsafe_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))
|
||||
#endif
|
||||
|
||||
#endif /* __LINUX_PERCPU_H */
|
||||
|
|
|
@ -419,7 +419,7 @@ int show_interrupts(struct seq_file *p, void *v)
|
|||
} else {
|
||||
seq_printf(p, " %8s", "None");
|
||||
}
|
||||
#ifdef CONFIG_GENIRC_IRQ_SHOW_LEVEL
|
||||
#ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
|
||||
seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
|
||||
#endif
|
||||
if (desc->name)
|
||||
|
|
|
@ -234,7 +234,7 @@ EXPORT_SYMBOL(flex_array_clear);
|
|||
* flex_array_prealloc - guarantee that array space exists
|
||||
* @fa: the flex array for which to preallocate parts
|
||||
* @start: index of first array element for which space is allocated
|
||||
* @end: index of last (inclusive) element for which space is allocated
|
||||
* @nr_elements: number of elements for which space is allocated
|
||||
* @flags: page allocation flags
|
||||
*
|
||||
* This will guarantee that no future calls to flex_array_put()
|
||||
|
@ -245,14 +245,24 @@ EXPORT_SYMBOL(flex_array_clear);
|
|||
* Locking must be provided by the caller.
|
||||
*/
|
||||
int flex_array_prealloc(struct flex_array *fa, unsigned int start,
|
||||
unsigned int end, gfp_t flags)
|
||||
unsigned int nr_elements, gfp_t flags)
|
||||
{
|
||||
int start_part;
|
||||
int end_part;
|
||||
int part_nr;
|
||||
unsigned int end;
|
||||
struct flex_array_part *part;
|
||||
|
||||
if (start >= fa->total_nr_elements || end >= fa->total_nr_elements)
|
||||
if (!start && !nr_elements)
|
||||
return 0;
|
||||
if (start >= fa->total_nr_elements)
|
||||
return -ENOSPC;
|
||||
if (!nr_elements)
|
||||
return 0;
|
||||
|
||||
end = start + nr_elements - 1;
|
||||
|
||||
if (end >= fa->total_nr_elements)
|
||||
return -ENOSPC;
|
||||
if (elements_fit_in_base(fa))
|
||||
return 0;
|
||||
|
@ -343,6 +353,8 @@ int flex_array_shrink(struct flex_array *fa)
|
|||
int part_nr;
|
||||
int ret = 0;
|
||||
|
||||
if (!fa->total_nr_elements)
|
||||
return 0;
|
||||
if (elements_fit_in_base(fa))
|
||||
return ret;
|
||||
for (part_nr = 0; part_nr < FLEX_ARRAY_NR_BASE_PTRS; part_nr++) {
|
||||
|
|
|
@ -1359,7 +1359,7 @@ split_fallthrough:
|
|||
*/
|
||||
mark_page_accessed(page);
|
||||
}
|
||||
if (flags & FOLL_MLOCK) {
|
||||
if ((flags & FOLL_MLOCK) && (vma->vm_flags & VM_LOCKED)) {
|
||||
/*
|
||||
* The preliminary mapping check is mainly to avoid the
|
||||
* pointless overhead of lock_page on the ZERO_PAGE
|
||||
|
@ -1552,10 +1552,9 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
|||
}
|
||||
|
||||
/*
|
||||
* If we don't actually want the page itself,
|
||||
* and it's the stack guard page, just skip it.
|
||||
* For mlock, just skip the stack guard page.
|
||||
*/
|
||||
if (!pages && stack_guard_page(vma, start))
|
||||
if ((gup_flags & FOLL_MLOCK) && stack_guard_page(vma, start))
|
||||
goto next_page;
|
||||
|
||||
do {
|
||||
|
|
|
@ -162,7 +162,7 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
|
|||
VM_BUG_ON(end > vma->vm_end);
|
||||
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
|
||||
|
||||
gup_flags = FOLL_TOUCH;
|
||||
gup_flags = FOLL_TOUCH | FOLL_MLOCK;
|
||||
/*
|
||||
* We want to touch writable mappings with a write fault in order
|
||||
* to break COW, except for shared mappings because these don't COW
|
||||
|
@ -178,9 +178,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
|
|||
if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
|
||||
gup_flags |= FOLL_FORCE;
|
||||
|
||||
if (vma->vm_flags & VM_LOCKED)
|
||||
gup_flags |= FOLL_MLOCK;
|
||||
|
||||
return __get_user_pages(current, mm, addr, nr_pages, gup_flags,
|
||||
NULL, NULL, nonblocking);
|
||||
}
|
||||
|
|
|
@ -1940,7 +1940,7 @@ redo:
|
|||
* Since this is without lock semantics the protection is only against
|
||||
* code executing on this cpu *not* from access by other cpus.
|
||||
*/
|
||||
if (unlikely(!this_cpu_cmpxchg_double(
|
||||
if (unlikely(!irqsafe_cpu_cmpxchg_double(
|
||||
s->cpu_slab->freelist, s->cpu_slab->tid,
|
||||
object, tid,
|
||||
get_freepointer(s, object), next_tid(tid)))) {
|
||||
|
@ -2145,7 +2145,7 @@ redo:
|
|||
set_freepointer(s, object, c->freelist);
|
||||
|
||||
#ifdef CONFIG_CMPXCHG_LOCAL
|
||||
if (unlikely(!this_cpu_cmpxchg_double(
|
||||
if (unlikely(!irqsafe_cpu_cmpxchg_double(
|
||||
s->cpu_slab->freelist, s->cpu_slab->tid,
|
||||
c->freelist, tid,
|
||||
object, next_tid(tid)))) {
|
||||
|
|
|
@ -2267,6 +2267,19 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
|
|||
m->more_to_follow = false;
|
||||
m->pool = NULL;
|
||||
|
||||
/* middle */
|
||||
m->middle = NULL;
|
||||
|
||||
/* data */
|
||||
m->nr_pages = 0;
|
||||
m->page_alignment = 0;
|
||||
m->pages = NULL;
|
||||
m->pagelist = NULL;
|
||||
m->bio = NULL;
|
||||
m->bio_iter = NULL;
|
||||
m->bio_seg = 0;
|
||||
m->trail = NULL;
|
||||
|
||||
/* front */
|
||||
if (front_len) {
|
||||
if (front_len > PAGE_CACHE_SIZE) {
|
||||
|
@ -2286,19 +2299,6 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
|
|||
}
|
||||
m->front.iov_len = front_len;
|
||||
|
||||
/* middle */
|
||||
m->middle = NULL;
|
||||
|
||||
/* data */
|
||||
m->nr_pages = 0;
|
||||
m->page_alignment = 0;
|
||||
m->pages = NULL;
|
||||
m->pagelist = NULL;
|
||||
m->bio = NULL;
|
||||
m->bio_iter = NULL;
|
||||
m->bio_seg = 0;
|
||||
m->trail = NULL;
|
||||
|
||||
dout("ceph_msg_new %p front %d\n", m, front_len);
|
||||
return m;
|
||||
|
||||
|
|
|
@ -470,8 +470,8 @@ struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
|
|||
snapc, ops,
|
||||
use_mempool,
|
||||
GFP_NOFS, NULL, NULL);
|
||||
if (IS_ERR(req))
|
||||
return req;
|
||||
if (!req)
|
||||
return NULL;
|
||||
|
||||
/* calculate max write size */
|
||||
calc_layout(osdc, vino, layout, off, plen, req, ops);
|
||||
|
|
|
@ -1578,7 +1578,8 @@ static int may_create(struct inode *dir,
|
|||
return rc;
|
||||
|
||||
if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
|
||||
rc = security_transition_sid(sid, dsec->sid, tclass, NULL, &newsid);
|
||||
rc = security_transition_sid(sid, dsec->sid, tclass,
|
||||
&dentry->d_name, &newsid);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -502,7 +502,7 @@ static int policydb_index(struct policydb *p)
|
|||
goto out;
|
||||
|
||||
rc = flex_array_prealloc(p->type_val_to_struct_array, 0,
|
||||
p->p_types.nprim - 1, GFP_KERNEL | __GFP_ZERO);
|
||||
p->p_types.nprim, GFP_KERNEL | __GFP_ZERO);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
|
@ -519,7 +519,7 @@ static int policydb_index(struct policydb *p)
|
|||
goto out;
|
||||
|
||||
rc = flex_array_prealloc(p->sym_val_to_name[i],
|
||||
0, p->symtab[i].nprim - 1,
|
||||
0, p->symtab[i].nprim,
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
@ -2375,7 +2375,7 @@ int policydb_read(struct policydb *p, void *fp)
|
|||
goto bad;
|
||||
|
||||
/* preallocate so we don't have to worry about the put ever failing */
|
||||
rc = flex_array_prealloc(p->type_attr_map_array, 0, p->p_types.nprim - 1,
|
||||
rc = flex_array_prealloc(p->type_attr_map_array, 0, p->p_types.nprim,
|
||||
GFP_KERNEL | __GFP_ZERO);
|
||||
if (rc)
|
||||
goto bad;
|
||||
|
|
Загрузка…
Ссылка в новой задаче