Merge branch 'paul' (Fixups from Paul Gortmaker)

This merges some of the fixes from Paul Gortmaker for the header file
cleanup fallout.

Some of the patches are going through arch maintainer trees, and David
Howells suggested another be done differently, but this at least fixes a
few cases.

* emailed from Paul Gortmaker <paul.gortmaker@windriver.com>:
  asm-generic: add linux/types.h to cmpxchg.h
  firewire: restore the device.h include in linux/firewire.h
  frv: fix warnings in mb93090-mb00/pci-dma.c about implicit EXPORT_SYMBOL
  parisc: fix missing cmpxchg file error from system.h split
  blackfin: fix cmpxchg build fails from system.h fallout
  avr32: fix build failures from mis-naming of atmel_nand.h
  ARM: mach-msm: fix compile fail from system.h fallout
  irq_work: fix compile failure on MIPS from system.h split
This commit is contained in:
Linus Torvalds 2012-04-02 14:41:43 -07:00
Родитель b1a808ff43 80da6a4fee
Коммит 95694129b4
10 изменённых файлов: 128 добавлений и 113 удалений

Просмотреть файл

@ -16,6 +16,7 @@
#ifndef __ASM_ARCH_MSM_UNCOMPRESS_H
#define __ASM_ARCH_MSM_UNCOMPRESS_H
#include <asm/barrier.h>
#include <asm/processor.h>
#include <mach/msm_iomap.h>

Просмотреть файл

@ -7,7 +7,7 @@
#include <linux/types.h>
#include <linux/serial.h>
#include <linux/platform_data/macb.h>
#include <linux/platform_data/atmel_nand.h>
#include <linux/platform_data/atmel.h>
#define GPIO_PIN_NONE (-1)

Просмотреть файл

@ -122,7 +122,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
(unsigned long)(n), sizeof(*(ptr))))
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#include <asm-generic/cmpxchg.h>
#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#endif /* !CONFIG_SMP */

Просмотреть файл

@ -13,6 +13,7 @@
#include <linux/dma-mapping.h>
#include <linux/list.h>
#include <linux/pci.h>
#include <linux/export.h>
#include <linux/highmem.h>
#include <linux/scatterlist.h>
#include <asm/io.h>

Просмотреть файл

@ -6,6 +6,7 @@
#define _ASM_PARISC_ATOMIC_H_
#include <linux/types.h>
#include <asm/cmpxchg.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@ -48,112 +49,6 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
#endif
/* This should get optimized out since it's never called.
** Or get a link error if xchg is used "wrong".
*/
extern void __xchg_called_with_bad_pointer(void);
/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
extern unsigned long __xchg8(char, char *);
extern unsigned long __xchg32(int, int *);
#ifdef CONFIG_64BIT
extern unsigned long __xchg64(unsigned long, unsigned long *);
#endif
/* optimizer better get rid of switch since size is a constant */
static __inline__ unsigned long
__xchg(unsigned long x, __volatile__ void * ptr, int size)
{
switch(size) {
#ifdef CONFIG_64BIT
case 8: return __xchg64(x,(unsigned long *) ptr);
#endif
case 4: return __xchg32((int) x, (int *) ptr);
case 1: return __xchg8((char) x, (char *) ptr);
}
__xchg_called_with_bad_pointer();
return x;
}
/*
** REVISIT - Abandoned use of LDCW in xchg() for now:
** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
** o and while we are at it, could CONFIG_64BIT code use LDCD too?
**
** if (__builtin_constant_p(x) && (x == NULL))
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define __HAVE_ARCH_CMPXCHG 1
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);
/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
/* don't worry...optimizer will get rid of most of this */
static __inline__ unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
switch(size) {
#ifdef CONFIG_64BIT
case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
}
__cmpxchg_called_with_bad_pointer();
return old;
}
#define cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})
#include <asm-generic/cmpxchg-local.h>
static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
unsigned long new_, int size)
{
switch (size) {
#ifdef CONFIG_64BIT
case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
#endif
case 4: return __cmpxchg_u32(ptr, old, new_);
default:
return __cmpxchg_local_generic(ptr, old, new_, size);
}
}
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#ifdef CONFIG_64BIT
#define cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#else
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
/*
* Note that we need not lock read accesses - aligned word writes/reads
* are atomic, so a reader never sees inconsistent values.

Просмотреть файл

@ -0,0 +1,116 @@
/*
* forked from parisc asm/atomic.h which was:
* Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
* Copyright (C) 2006 Kyle McMartin <kyle@parisc-linux.org>
*/
#ifndef _ASM_PARISC_CMPXCHG_H_
#define _ASM_PARISC_CMPXCHG_H_
/* This should get optimized out since it's never called.
** Or get a link error if xchg is used "wrong".
*/
extern void __xchg_called_with_bad_pointer(void);
/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
extern unsigned long __xchg8(char, char *);
extern unsigned long __xchg32(int, int *);
#ifdef CONFIG_64BIT
extern unsigned long __xchg64(unsigned long, unsigned long *);
#endif
/* optimizer better get rid of switch since size is a constant */
static inline unsigned long
__xchg(unsigned long x, __volatile__ void *ptr, int size)
{
switch (size) {
#ifdef CONFIG_64BIT
case 8: return __xchg64(x, (unsigned long *) ptr);
#endif
case 4: return __xchg32((int) x, (int *) ptr);
case 1: return __xchg8((char) x, (char *) ptr);
}
__xchg_called_with_bad_pointer();
return x;
}
/*
** REVISIT - Abandoned use of LDCW in xchg() for now:
** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
** o and while we are at it, could CONFIG_64BIT code use LDCD too?
**
** if (__builtin_constant_p(x) && (x == NULL))
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
#define xchg(ptr, x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
#define __HAVE_ARCH_CMPXCHG 1
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);
/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
unsigned int new_);
extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr,
unsigned long old, unsigned long new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
switch (size) {
#ifdef CONFIG_64BIT
case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr,
(unsigned int)old, (unsigned int)new_);
}
__cmpxchg_called_with_bad_pointer();
return old;
}
#define cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})
#include <asm-generic/cmpxchg-local.h>
static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
unsigned long new_, int size)
{
switch (size) {
#ifdef CONFIG_64BIT
case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
#endif
case 4: return __cmpxchg_u32(ptr, old, new_);
default:
return __cmpxchg_local_generic(ptr, old, new_, size);
}
}
/*
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#ifdef CONFIG_64BIT
#define cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#else
#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
#endif
#endif /* _ASM_PARISC_CMPXCHG_H_ */

Просмотреть файл

@ -10,6 +10,7 @@
#error "Cannot use generic cmpxchg on SMP"
#endif
#include <linux/types.h>
#include <linux/irqflags.h>
#ifndef xchg

Просмотреть файл

@ -2,6 +2,7 @@
#define _LINUX_FIREWIRE_H
#include <linux/completion.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/kref.h>
@ -64,8 +65,6 @@
#define CSR_MODEL 0x17
#define CSR_DIRECTORY_ID 0x20
struct device;
struct fw_csr_iterator {
const u32 *p;
const u32 *end;

Просмотреть файл

@ -4,8 +4,8 @@
* GPL v2 Only
*/
#ifndef __ATMEL_NAND_H__
#define __ATMEL_NAND_H__
#ifndef __ATMEL_H__
#define __ATMEL_H__
#include <linux/mtd/nand.h>
@ -24,4 +24,4 @@ struct atmel_nand_data {
unsigned int num_parts;
};
#endif /* __ATMEL_NAND_H__ */
#endif /* __ATMEL_H__ */

Просмотреть файл

@ -5,6 +5,7 @@
* context. The enqueueing is NMI-safe.
*/
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/irq_work.h>