Merge branch 'gpio-for-grant' of git://sources.calxeda.com/kernel/linux into gpio/next

Conflicts:
	drivers/gpio/gpio-pl061.c
This commit is contained in:
Grant Likely 2012-01-05 11:05:51 -07:00
Родитель 1a0703ede4 2de0dbc5f6
Коммит fda87903f4
353 изменённых файлов: 5013 добавлений и 2506 удалений

Просмотреть файл

@ -57,13 +57,6 @@ create_snap
$ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_create
rollback_snap
Rolls back data to the specified snapshot. This goes over the entire
list of rados blocks and sends a rollback command to each.
$ echo <snap-name> > /sys/bus/rbd/devices/<dev-id>/snap_rollback
snap_*
A directory per each snapshot

Просмотреть файл

@ -3102,6 +3102,7 @@ F: include/linux/hid*
HIGH-RESOLUTION TIMERS, CLOCKEVENTS, DYNTICKS
M: Thomas Gleixner <tglx@linutronix.de>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
S: Maintained
F: Documentation/timers/
F: kernel/hrtimer.c
@ -3611,7 +3612,7 @@ F: net/irda/
IRQ SUBSYSTEM
M: Thomas Gleixner <tglx@linutronix.de>
S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git irq/core
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
F: kernel/irq/
ISAPNP
@ -4099,7 +4100,7 @@ F: drivers/hwmon/lm90.c
LOCKDEP AND LOCKSTAT
M: Peter Zijlstra <peterz@infradead.org>
M: Ingo Molnar <mingo@redhat.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/locking
S: Maintained
F: Documentation/lockdep*.txt
F: Documentation/lockstat.txt
@ -4281,7 +4282,9 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
S: Maintained
F: Documentation/dvb/
F: Documentation/video4linux/
F: Documentation/DocBook/media/
F: drivers/media/
F: drivers/staging/media/
F: include/media/
F: include/linux/dvb/
F: include/linux/videodev*.h
@ -5087,6 +5090,7 @@ M: Peter Zijlstra <a.p.zijlstra@chello.nl>
M: Paul Mackerras <paulus@samba.org>
M: Ingo Molnar <mingo@elte.hu>
M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Supported
F: kernel/events/*
F: include/linux/perf_event.h
@ -5166,6 +5170,7 @@ F: drivers/scsi/pm8001/
POSIX CLOCKS and TIMERS
M: Thomas Gleixner <tglx@linutronix.de>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
S: Supported
F: fs/timerfd.c
F: include/linux/timer*
@ -5681,6 +5686,7 @@ F: drivers/dma/dw_dmac.c
TIMEKEEPING, NTP
M: John Stultz <johnstul@us.ibm.com>
M: Thomas Gleixner <tglx@linutronix.de>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git timers/core
S: Supported
F: include/linux/clocksource.h
F: include/linux/time.h
@ -5705,6 +5711,7 @@ F: drivers/watchdog/sc1200wdt.c
SCHEDULER
M: Ingo Molnar <mingo@elte.hu>
M: Peter Zijlstra <peterz@infradead.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched/core
S: Maintained
F: kernel/sched*
F: include/linux/sched.h
@ -6632,7 +6639,7 @@ TRACING
M: Steven Rostedt <rostedt@goodmis.org>
M: Frederic Weisbecker <fweisbec@gmail.com>
M: Ingo Molnar <mingo@redhat.com>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip.git perf/core
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
S: Maintained
F: Documentation/trace/ftrace.txt
F: arch/*/*/*/ftrace.h
@ -7382,7 +7389,7 @@ M: Thomas Gleixner <tglx@linutronix.de>
M: Ingo Molnar <mingo@redhat.com>
M: "H. Peter Anvin" <hpa@zytor.com>
M: x86@kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/x86/linux-2.6-x86.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
S: Maintained
F: Documentation/x86/
F: arch/x86/

Просмотреть файл

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 2
SUBLEVEL = 0
EXTRAVERSION = -rc5
EXTRAVERSION = -rc7
NAME = Saber-toothed Squirrel
# *DOCUMENTATION*

Просмотреть файл

@ -220,8 +220,9 @@ config NEED_MACH_MEMORY_H
be avoided when possible.
config PHYS_OFFSET
hex "Physical address of main memory"
hex "Physical address of main memory" if MMU
depends on !ARM_PATCH_PHYS_VIRT && !NEED_MACH_MEMORY_H
default DRAM_BASE if !MMU
help
Please provide the physical address corresponding to the
location of main memory in your system.

Просмотреть файл

@ -30,14 +30,15 @@ enum unwind_reason_code {
};
struct unwind_idx {
unsigned long addr;
unsigned long addr_offset;
unsigned long insn;
};
struct unwind_table {
struct list_head list;
struct unwind_idx *start;
struct unwind_idx *stop;
const struct unwind_idx *start;
const struct unwind_idx *origin;
const struct unwind_idx *stop;
unsigned long begin_addr;
unsigned long end_addr;
};
@ -49,15 +50,6 @@ extern struct unwind_table *unwind_table_add(unsigned long start,
extern void unwind_table_del(struct unwind_table *tab);
extern void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk);
#ifdef CONFIG_ARM_UNWIND
extern int __init unwind_init(void);
#else
static inline int __init unwind_init(void)
{
return 0;
}
#endif
#endif /* !__ASSEMBLY__ */
#ifdef CONFIG_ARM_UNWIND

Просмотреть файл

@ -640,6 +640,9 @@ static struct platform_device_id armpmu_plat_device_ids[] = {
static int __devinit armpmu_device_probe(struct platform_device *pdev)
{
if (!cpu_pmu)
return -ENODEV;
cpu_pmu->plat_device = pdev;
return 0;
}

Просмотреть файл

@ -895,8 +895,6 @@ void __init setup_arch(char **cmdline_p)
{
struct machine_desc *mdesc;
unwind_init();
setup_processor();
mdesc = setup_machine_fdt(__atags_pointer);
if (!mdesc)
@ -904,6 +902,12 @@ void __init setup_arch(char **cmdline_p)
machine_desc = mdesc;
machine_name = mdesc->name;
#ifdef CONFIG_ZONE_DMA
if (mdesc->dma_zone_size) {
extern unsigned long arm_dma_zone_size;
arm_dma_zone_size = mdesc->dma_zone_size;
}
#endif
if (mdesc->soft_reboot)
reboot_setup("s");
@ -934,12 +938,6 @@ void __init setup_arch(char **cmdline_p)
tcm_init();
#ifdef CONFIG_ZONE_DMA
if (mdesc->dma_zone_size) {
extern unsigned long arm_dma_zone_size;
arm_dma_zone_size = mdesc->dma_zone_size;
}
#endif
#ifdef CONFIG_MULTI_IRQ_HANDLER
handle_arch_irq = mdesc->handle_irq;
#endif

Просмотреть файл

@ -67,7 +67,7 @@ EXPORT_SYMBOL(__aeabi_unwind_cpp_pr2);
struct unwind_ctrl_block {
unsigned long vrs[16]; /* virtual register set */
unsigned long *insn; /* pointer to the current instructions word */
const unsigned long *insn; /* pointer to the current instructions word */
int entries; /* number of entries left to interpret */
int byte; /* current byte number in the instructions word */
};
@ -83,8 +83,9 @@ enum regs {
PC = 15
};
extern struct unwind_idx __start_unwind_idx[];
extern struct unwind_idx __stop_unwind_idx[];
extern const struct unwind_idx __start_unwind_idx[];
static const struct unwind_idx *__origin_unwind_idx;
extern const struct unwind_idx __stop_unwind_idx[];
static DEFINE_SPINLOCK(unwind_lock);
static LIST_HEAD(unwind_tables);
@ -98,45 +99,99 @@ static LIST_HEAD(unwind_tables);
})
/*
* Binary search in the unwind index. The entries entries are
* Binary search in the unwind index. The entries are
* guaranteed to be sorted in ascending order by the linker.
*
* start = first entry
* origin = first entry with positive offset (or stop if there is no such entry)
* stop - 1 = last entry
*/
static struct unwind_idx *search_index(unsigned long addr,
struct unwind_idx *first,
struct unwind_idx *last)
static const struct unwind_idx *search_index(unsigned long addr,
const struct unwind_idx *start,
const struct unwind_idx *origin,
const struct unwind_idx *stop)
{
pr_debug("%s(%08lx, %p, %p)\n", __func__, addr, first, last);
unsigned long addr_prel31;
if (addr < first->addr) {
pr_warning("unwind: Unknown symbol address %08lx\n", addr);
return NULL;
} else if (addr >= last->addr)
return last;
pr_debug("%s(%08lx, %p, %p, %p)\n",
__func__, addr, start, origin, stop);
while (first < last - 1) {
struct unwind_idx *mid = first + ((last - first + 1) >> 1);
/*
* only search in the section with the matching sign. This way the
* prel31 numbers can be compared as unsigned longs.
*/
if (addr < (unsigned long)start)
/* negative offsets: [start; origin) */
stop = origin;
else
/* positive offsets: [origin; stop) */
start = origin;
if (addr < mid->addr)
last = mid;
else
first = mid;
/* prel31 for address relavive to start */
addr_prel31 = (addr - (unsigned long)start) & 0x7fffffff;
while (start < stop - 1) {
const struct unwind_idx *mid = start + ((stop - start) >> 1);
/*
* As addr_prel31 is relative to start an offset is needed to
* make it relative to mid.
*/
if (addr_prel31 - ((unsigned long)mid - (unsigned long)start) <
mid->addr_offset)
stop = mid;
else {
/* keep addr_prel31 relative to start */
addr_prel31 -= ((unsigned long)mid -
(unsigned long)start);
start = mid;
}
}
return first;
if (likely(start->addr_offset <= addr_prel31))
return start;
else {
pr_warning("unwind: Unknown symbol address %08lx\n", addr);
return NULL;
}
}
static struct unwind_idx *unwind_find_idx(unsigned long addr)
static const struct unwind_idx *unwind_find_origin(
const struct unwind_idx *start, const struct unwind_idx *stop)
{
struct unwind_idx *idx = NULL;
pr_debug("%s(%p, %p)\n", __func__, start, stop);
while (start < stop) {
const struct unwind_idx *mid = start + ((stop - start) >> 1);
if (mid->addr_offset >= 0x40000000)
/* negative offset */
start = mid + 1;
else
/* positive offset */
stop = mid;
}
pr_debug("%s -> %p\n", __func__, stop);
return stop;
}
static const struct unwind_idx *unwind_find_idx(unsigned long addr)
{
const struct unwind_idx *idx = NULL;
unsigned long flags;
pr_debug("%s(%08lx)\n", __func__, addr);
if (core_kernel_text(addr))
if (core_kernel_text(addr)) {
if (unlikely(!__origin_unwind_idx))
__origin_unwind_idx =
unwind_find_origin(__start_unwind_idx,
__stop_unwind_idx);
/* main unwind table */
idx = search_index(addr, __start_unwind_idx,
__stop_unwind_idx - 1);
else {
__origin_unwind_idx,
__stop_unwind_idx);
} else {
/* module unwind tables */
struct unwind_table *table;
@ -145,7 +200,8 @@ static struct unwind_idx *unwind_find_idx(unsigned long addr)
if (addr >= table->begin_addr &&
addr < table->end_addr) {
idx = search_index(addr, table->start,
table->stop - 1);
table->origin,
table->stop);
/* Move-to-front to exploit common traces */
list_move(&table->list, &unwind_tables);
break;
@ -274,7 +330,7 @@ static int unwind_exec_insn(struct unwind_ctrl_block *ctrl)
int unwind_frame(struct stackframe *frame)
{
unsigned long high, low;
struct unwind_idx *idx;
const struct unwind_idx *idx;
struct unwind_ctrl_block ctrl;
/* only go to a higher address on the stack */
@ -399,7 +455,6 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
unsigned long text_size)
{
unsigned long flags;
struct unwind_idx *idx;
struct unwind_table *tab = kmalloc(sizeof(*tab), GFP_KERNEL);
pr_debug("%s(%08lx, %08lx, %08lx, %08lx)\n", __func__, start, size,
@ -408,15 +463,12 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size,
if (!tab)
return tab;
tab->start = (struct unwind_idx *)start;
tab->stop = (struct unwind_idx *)(start + size);
tab->start = (const struct unwind_idx *)start;
tab->stop = (const struct unwind_idx *)(start + size);
tab->origin = unwind_find_origin(tab->start, tab->stop);
tab->begin_addr = text_addr;
tab->end_addr = text_addr + text_size;
/* Convert the symbol addresses to absolute values */
for (idx = tab->start; idx < tab->stop; idx++)
idx->addr = prel31_to_addr(&idx->addr);
spin_lock_irqsave(&unwind_lock, flags);
list_add_tail(&tab->list, &unwind_tables);
spin_unlock_irqrestore(&unwind_lock, flags);
@ -437,16 +489,3 @@ void unwind_table_del(struct unwind_table *tab)
kfree(tab);
}
int __init unwind_init(void)
{
struct unwind_idx *idx;
/* Convert the symbol addresses to absolute values */
for (idx = __start_unwind_idx; idx < __stop_unwind_idx; idx++)
idx->addr = prel31_to_addr(&idx->addr);
pr_debug("unwind: ARM stack unwinding initialised\n");
return 0;
}

Просмотреть файл

@ -44,8 +44,6 @@ struct mct_clock_event_device {
char name[10];
};
static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
static void exynos4_mct_write(unsigned int value, void *addr)
{
void __iomem *stat_addr;
@ -264,6 +262,9 @@ static void exynos4_clockevent_init(void)
}
#ifdef CONFIG_LOCAL_TIMERS
static DEFINE_PER_CPU(struct mct_clock_event_device, percpu_mct_tick);
/* Clock event handling */
static void exynos4_mct_tick_stop(struct mct_clock_event_device *mevt)
{
@ -428,9 +429,13 @@ int __cpuinit local_timer_setup(struct clock_event_device *evt)
void local_timer_stop(struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt);
if (mct_int_type == MCT_INT_SPI)
disable_irq(evt->irq);
if (cpu == 0)
remove_irq(evt->irq, &mct_tick0_event_irq);
else
remove_irq(evt->irq, &mct_tick1_event_irq);
else
disable_percpu_irq(IRQ_MCT_LOCALTIMER);
}
@ -443,6 +448,7 @@ static void __init exynos4_timer_resources(void)
clk_rate = clk_get_rate(mct_clk);
#ifdef CONFIG_LOCAL_TIMERS
if (mct_int_type == MCT_INT_PPI) {
int err;
@ -452,6 +458,7 @@ static void __init exynos4_timer_resources(void)
WARN(err, "MCT: can't request IRQ %d (%d)\n",
IRQ_MCT_LOCALTIMER, err);
}
#endif /* CONFIG_LOCAL_TIMERS */
}
static void __init exynos4_timer_init(void)

Просмотреть файл

@ -362,7 +362,7 @@ static void __init mx51_babbage_init(void)
{
iomux_v3_cfg_t usbh1stp = MX51_PAD_USBH1_STP__USBH1_STP;
iomux_v3_cfg_t power_key = NEW_PAD_CTRL(MX51_PAD_EIM_A27__GPIO2_21,
PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH | PAD_CTL_PUS_100K_UP);
PAD_CTL_SRE_FAST | PAD_CTL_DSE_HIGH);
imx51_soc_init();

Просмотреть файл

@ -106,7 +106,7 @@ static inline void mx53_evk_fec_reset(void)
gpio_set_value(MX53_EVK_FEC_PHY_RST, 1);
}
static struct fec_platform_data mx53_evk_fec_pdata = {
static const struct fec_platform_data mx53_evk_fec_pdata __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};

Просмотреть файл

@ -242,7 +242,7 @@ static inline void mx53_loco_fec_reset(void)
gpio_set_value(LOCO_FEC_PHY_RST, 1);
}
static struct fec_platform_data mx53_loco_fec_data = {
static const struct fec_platform_data mx53_loco_fec_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};

Просмотреть файл

@ -104,7 +104,7 @@ static inline void mx53_smd_fec_reset(void)
gpio_set_value(SMD_FEC_PHY_RST, 1);
}
static struct fec_platform_data mx53_smd_fec_data = {
static const struct fec_platform_data mx53_smd_fec_data __initconst = {
.phy = PHY_INTERFACE_MODE_RMII,
};

Просмотреть файл

@ -193,7 +193,7 @@ static struct platform_device rx51_charger_device = {
static void __init rx51_charger_init(void)
{
WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO,
GPIOF_OUT_INIT_LOW, "isp1704_reset"));
GPIOF_OUT_INIT_HIGH, "isp1704_reset"));
platform_device_register(&rx51_charger_device);
}

Просмотреть файл

@ -145,6 +145,9 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
pdata->reg_size = 4;
pdata->has_ccr = true;
}
pdata->set_clk_src = omap2_mcbsp_set_clk_src;
if (id == 1)
pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
if (oh->class->rev == MCBSP_CONFIG_TYPE3) {
if (id == 2)
@ -174,9 +177,6 @@ static int omap_init_mcbsp(struct omap_hwmod *oh, void *unused)
name, oh->name);
return PTR_ERR(pdev);
}
pdata->set_clk_src = omap2_mcbsp_set_clk_src;
if (id == 1)
pdata->mux_signal = omap2_mcbsp1_mux_rx_clk;
omap_mcbsp_count++;
return 0;
}

Просмотреть файл

@ -117,17 +117,14 @@ static void __init realview_eb_map_io(void)
static struct pl061_platform_data gpio0_plat_data = {
.gpio_base = 0,
.irq_base = -1,
};
static struct pl061_platform_data gpio1_plat_data = {
.gpio_base = 8,
.irq_base = -1,
};
static struct pl061_platform_data gpio2_plat_data = {
.gpio_base = 16,
.irq_base = -1,
};
static struct pl022_ssp_controller ssp0_plat_data = {

Просмотреть файл

@ -113,17 +113,14 @@ static void __init realview_pb1176_map_io(void)
static struct pl061_platform_data gpio0_plat_data = {
.gpio_base = 0,
.irq_base = -1,
};
static struct pl061_platform_data gpio1_plat_data = {
.gpio_base = 8,
.irq_base = -1,
};
static struct pl061_platform_data gpio2_plat_data = {
.gpio_base = 16,
.irq_base = -1,
};
static struct pl022_ssp_controller ssp0_plat_data = {

Просмотреть файл

@ -112,17 +112,14 @@ static void __init realview_pb11mp_map_io(void)
static struct pl061_platform_data gpio0_plat_data = {
.gpio_base = 0,
.irq_base = -1,
};
static struct pl061_platform_data gpio1_plat_data = {
.gpio_base = 8,
.irq_base = -1,
};
static struct pl061_platform_data gpio2_plat_data = {
.gpio_base = 16,
.irq_base = -1,
};
static struct pl022_ssp_controller ssp0_plat_data = {

Просмотреть файл

@ -102,17 +102,14 @@ static void __init realview_pba8_map_io(void)
static struct pl061_platform_data gpio0_plat_data = {
.gpio_base = 0,
.irq_base = -1,
};
static struct pl061_platform_data gpio1_plat_data = {
.gpio_base = 8,
.irq_base = -1,
};
static struct pl061_platform_data gpio2_plat_data = {
.gpio_base = 16,
.irq_base = -1,
};
static struct pl022_ssp_controller ssp0_plat_data = {

Просмотреть файл

@ -124,17 +124,14 @@ static void __init realview_pbx_map_io(void)
static struct pl061_platform_data gpio0_plat_data = {
.gpio_base = 0,
.irq_base = -1,
};
static struct pl061_platform_data gpio1_plat_data = {
.gpio_base = 8,
.irq_base = -1,
};
static struct pl061_platform_data gpio2_plat_data = {
.gpio_base = 16,
.irq_base = -1,
};
static struct pl022_ssp_controller ssp0_plat_data = {

Просмотреть файл

@ -273,6 +273,7 @@ static struct samsung_bl_gpio_info smdkv210_bl_gpio_info = {
static struct platform_pwm_backlight_data smdkv210_bl_data = {
.pwm_id = 3,
.pwm_period_ns = 1000,
};
static void __init smdkv210_map_io(void)

Просмотреть файл

@ -607,6 +607,7 @@ struct sys_timer ag5evm_timer = {
MACHINE_START(AG5EVM, "ag5evm")
.map_io = ag5evm_map_io,
.nr_irqs = NR_IRQS_LEGACY,
.init_irq = sh73a0_init_irq,
.handle_irq = shmobile_handle_irq_gic,
.init_machine = ag5evm_init,

Просмотреть файл

@ -33,6 +33,7 @@
#include <linux/input/sh_keysc.h>
#include <linux/gpio_keys.h>
#include <linux/leds.h>
#include <linux/platform_data/leds-renesas-tpu.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sh_mmcif.h>
#include <linux/mfd/tmio.h>
@ -56,7 +57,7 @@ static struct resource smsc9220_resources[] = {
.flags = IORESOURCE_MEM,
},
[1] = {
.start = gic_spi(33), /* PINTA2 @ PORT144 */
.start = SH73A0_PINT0_IRQ(2), /* PINTA2 */
.flags = IORESOURCE_IRQ,
},
};
@ -157,10 +158,6 @@ static struct platform_device gpio_keys_device = {
#define GPIO_LED(n, g) { .name = n, .gpio = g }
static struct gpio_led gpio_leds[] = {
GPIO_LED("V2513", GPIO_PORT153), /* PORT153 [TPU1T02] -> V2513 */
GPIO_LED("V2514", GPIO_PORT199), /* PORT199 [TPU4TO1] -> V2514 */
GPIO_LED("V2515", GPIO_PORT197), /* PORT197 [TPU2TO1] -> V2515 */
GPIO_LED("KEYLED", GPIO_PORT163), /* PORT163 [TPU3TO0] -> KEYLED */
GPIO_LED("G", GPIO_PORT20), /* PORT20 [GPO0] -> LED7 -> "G" */
GPIO_LED("H", GPIO_PORT21), /* PORT21 [GPO1] -> LED8 -> "H" */
GPIO_LED("J", GPIO_PORT22), /* PORT22 [GPO2] -> LED9 -> "J" */
@ -179,6 +176,119 @@ static struct platform_device gpio_leds_device = {
},
};
/* TPU LED */
static struct led_renesas_tpu_config led_renesas_tpu12_pdata = {
.name = "V2513",
.pin_gpio_fn = GPIO_FN_TPU1TO2,
.pin_gpio = GPIO_PORT153,
.channel_offset = 0x90,
.timer_bit = 2,
.max_brightness = 1000,
};
static struct resource tpu12_resources[] = {
[0] = {
.name = "TPU12",
.start = 0xe6610090,
.end = 0xe66100b5,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device leds_tpu12_device = {
.name = "leds-renesas-tpu",
.id = 12,
.dev = {
.platform_data = &led_renesas_tpu12_pdata,
},
.num_resources = ARRAY_SIZE(tpu12_resources),
.resource = tpu12_resources,
};
static struct led_renesas_tpu_config led_renesas_tpu41_pdata = {
.name = "V2514",
.pin_gpio_fn = GPIO_FN_TPU4TO1,
.pin_gpio = GPIO_PORT199,
.channel_offset = 0x50,
.timer_bit = 1,
.max_brightness = 1000,
};
static struct resource tpu41_resources[] = {
[0] = {
.name = "TPU41",
.start = 0xe6640050,
.end = 0xe6640075,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device leds_tpu41_device = {
.name = "leds-renesas-tpu",
.id = 41,
.dev = {
.platform_data = &led_renesas_tpu41_pdata,
},
.num_resources = ARRAY_SIZE(tpu41_resources),
.resource = tpu41_resources,
};
static struct led_renesas_tpu_config led_renesas_tpu21_pdata = {
.name = "V2515",
.pin_gpio_fn = GPIO_FN_TPU2TO1,
.pin_gpio = GPIO_PORT197,
.channel_offset = 0x50,
.timer_bit = 1,
.max_brightness = 1000,
};
static struct resource tpu21_resources[] = {
[0] = {
.name = "TPU21",
.start = 0xe6620050,
.end = 0xe6620075,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device leds_tpu21_device = {
.name = "leds-renesas-tpu",
.id = 21,
.dev = {
.platform_data = &led_renesas_tpu21_pdata,
},
.num_resources = ARRAY_SIZE(tpu21_resources),
.resource = tpu21_resources,
};
static struct led_renesas_tpu_config led_renesas_tpu30_pdata = {
.name = "KEYLED",
.pin_gpio_fn = GPIO_FN_TPU3TO0,
.pin_gpio = GPIO_PORT163,
.channel_offset = 0x10,
.timer_bit = 0,
.max_brightness = 1000,
};
static struct resource tpu30_resources[] = {
[0] = {
.name = "TPU30",
.start = 0xe6630010,
.end = 0xe6630035,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device leds_tpu30_device = {
.name = "leds-renesas-tpu",
.id = 30,
.dev = {
.platform_data = &led_renesas_tpu30_pdata,
},
.num_resources = ARRAY_SIZE(tpu30_resources),
.resource = tpu30_resources,
};
/* MMCIF */
static struct resource mmcif_resources[] = {
[0] = {
@ -291,6 +401,10 @@ static struct platform_device *kota2_devices[] __initdata = {
&keysc_device,
&gpio_keys_device,
&gpio_leds_device,
&leds_tpu12_device,
&leds_tpu41_device,
&leds_tpu21_device,
&leds_tpu30_device,
&mmcif_device,
&sdhi0_device,
&sdhi1_device,
@ -317,18 +431,6 @@ static void __init kota2_map_io(void)
shmobile_setup_console();
}
#define PINTER0A 0xe69000a0
#define PINTCR0A 0xe69000b0
void __init kota2_init_irq(void)
{
sh73a0_init_irq();
/* setup PINT: enable PINTA2 as active low */
__raw_writel(1 << 29, PINTER0A);
__raw_writew(2 << 10, PINTCR0A);
}
static void __init kota2_init(void)
{
sh73a0_pinmux_init();
@ -447,7 +549,8 @@ struct sys_timer kota2_timer = {
MACHINE_START(KOTA2, "kota2")
.map_io = kota2_map_io,
.init_irq = kota2_init_irq,
.nr_irqs = NR_IRQS_LEGACY,
.init_irq = sh73a0_init_irq,
.handle_irq = shmobile_handle_irq_gic,
.init_machine = kota2_init,
.timer = &kota2_timer,

Просмотреть файл

@ -113,6 +113,12 @@ static struct clk main_clk = {
.ops = &main_clk_ops,
};
/* Divide Main clock by two */
static struct clk main_div2_clk = {
.ops = &div2_clk_ops,
.parent = &main_clk,
};
/* PLL0, PLL1, PLL2, PLL3 */
static unsigned long pll_recalc(struct clk *clk)
{
@ -181,6 +187,7 @@ static struct clk *main_clks[] = {
&extal1_div2_clk,
&extal2_div2_clk,
&main_clk,
&main_div2_clk,
&pll0_clk,
&pll1_clk,
&pll2_clk,
@ -243,7 +250,7 @@ static struct clk div6_clks[DIV6_NR] = {
[DIV6_VCK1] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR1, 0),
[DIV6_VCK2] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR2, 0),
[DIV6_VCK3] = SH_CLK_DIV6(&pll1_div2_clk, VCLKCR3, 0),
[DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, 0),
[DIV6_ZB1] = SH_CLK_DIV6(&pll1_div2_clk, ZBCKCR, CLK_ENABLE_ON_INIT),
[DIV6_FLCTL] = SH_CLK_DIV6(&pll1_div2_clk, FLCKCR, 0),
[DIV6_SDHI0] = SH_CLK_DIV6(&pll1_div2_clk, SD0CKCR, 0),
[DIV6_SDHI1] = SH_CLK_DIV6(&pll1_div2_clk, SD1CKCR, 0),
@ -268,6 +275,7 @@ enum { MSTP001,
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
MSTP331, MSTP329, MSTP325, MSTP323, MSTP318,
MSTP314, MSTP313, MSTP312, MSTP311,
MSTP303, MSTP302, MSTP301, MSTP300,
MSTP411, MSTP410, MSTP403,
MSTP_NR };
@ -301,6 +309,10 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP313] = MSTP(&div6_clks[DIV6_SDHI1], SMSTPCR3, 13, 0), /* SDHI1 */
[MSTP312] = MSTP(&div4_clks[DIV4_HP], SMSTPCR3, 12, 0), /* MMCIF0 */
[MSTP311] = MSTP(&div6_clks[DIV6_SDHI2], SMSTPCR3, 11, 0), /* SDHI2 */
[MSTP303] = MSTP(&main_div2_clk, SMSTPCR3, 3, 0), /* TPU1 */
[MSTP302] = MSTP(&main_div2_clk, SMSTPCR3, 2, 0), /* TPU2 */
[MSTP301] = MSTP(&main_div2_clk, SMSTPCR3, 1, 0), /* TPU3 */
[MSTP300] = MSTP(&main_div2_clk, SMSTPCR3, 0, 0), /* TPU4 */
[MSTP411] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 11, 0), /* IIC3 */
[MSTP410] = MSTP(&div4_clks[DIV4_HP], SMSTPCR4, 10, 0), /* IIC4 */
[MSTP403] = MSTP(&r_clk, SMSTPCR4, 3, 0), /* KEYSC */
@ -350,6 +362,10 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[MSTP313]), /* SDHI1 */
CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[MSTP312]), /* MMCIF0 */
CLKDEV_DEV_ID("sh_mobile_sdhi.2", &mstp_clks[MSTP311]), /* SDHI2 */
CLKDEV_DEV_ID("leds-renesas-tpu.12", &mstp_clks[MSTP303]), /* TPU1 */
CLKDEV_DEV_ID("leds-renesas-tpu.21", &mstp_clks[MSTP302]), /* TPU2 */
CLKDEV_DEV_ID("leds-renesas-tpu.30", &mstp_clks[MSTP301]), /* TPU3 */
CLKDEV_DEV_ID("leds-renesas-tpu.41", &mstp_clks[MSTP300]), /* TPU4 */
CLKDEV_DEV_ID("i2c-sh_mobile.3", &mstp_clks[MSTP411]), /* I2C3 */
CLKDEV_DEV_ID("i2c-sh_mobile.4", &mstp_clks[MSTP410]), /* I2C4 */
CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[MSTP403]), /* KEYSC */

Просмотреть файл

@ -17,6 +17,7 @@
* the CPU clock speed on the fly.
*/
#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
#include <linux/err.h>

Просмотреть файл

@ -32,6 +32,9 @@
#define MX3_PWMSAR 0x0C /* PWM Sample Register */
#define MX3_PWMPR 0x10 /* PWM Period Register */
#define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4)
#define MX3_PWMCR_DOZEEN (1 << 24)
#define MX3_PWMCR_WAITEN (1 << 23)
#define MX3_PWMCR_DBGEN (1 << 22)
#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
#define MX3_PWMCR_CLKSRC_IPG (1 << 16)
#define MX3_PWMCR_EN (1 << 0)
@ -77,7 +80,9 @@ int pwm_config(struct pwm_device *pwm, int duty_ns, int period_ns)
writel(duty_cycles, pwm->mmio_base + MX3_PWMSAR);
writel(period_cycles, pwm->mmio_base + MX3_PWMPR);
cr = MX3_PWMCR_PRESCALER(prescale) | MX3_PWMCR_EN;
cr = MX3_PWMCR_PRESCALER(prescale) |
MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
MX3_PWMCR_DBGEN | MX3_PWMCR_EN;
if (cpu_is_mx25())
cr |= MX3_PWMCR_CLKSRC_IPG;

Просмотреть файл

@ -15,7 +15,6 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/pwm_backlight.h>
#include <linux/slab.h>
#include <plat/devs.h>
#include <plat/gpio-cfg.h>

Просмотреть файл

@ -88,7 +88,7 @@ static ssize_t hwsampler_write(struct file *file, char const __user *buf,
return -EINVAL;
retval = oprofilefs_ulong_from_user(&val, buf, count);
if (retval)
if (retval <= 0)
return retval;
if (oprofile_started)

Просмотреть файл

@ -50,9 +50,9 @@ static struct platform_device heartbeat_device = {
#define GBECONT 0xffc10100
#define GBECONT_RMII1 BIT(17)
#define GBECONT_RMII0 BIT(16)
static void sh7757_eth_set_mdio_gate(unsigned long addr)
static void sh7757_eth_set_mdio_gate(void *addr)
{
if ((addr & 0x00000fff) < 0x0800)
if (((unsigned long)addr & 0x00000fff) < 0x0800)
writel(readl(GBECONT) | GBECONT_RMII0, GBECONT);
else
writel(readl(GBECONT) | GBECONT_RMII1, GBECONT);
@ -116,9 +116,9 @@ static struct platform_device sh7757_eth1_device = {
},
};
static void sh7757_eth_giga_set_mdio_gate(unsigned long addr)
static void sh7757_eth_giga_set_mdio_gate(void *addr)
{
if ((addr & 0x00000fff) < 0x0800) {
if (((unsigned long)addr & 0x00000fff) < 0x0800) {
gpio_set_value(GPIO_PTT4, 1);
writel(readl(GBECONT) & ~GBECONT_RMII0, GBECONT);
} else {
@ -210,8 +210,12 @@ static struct resource sh_mmcif_resources[] = {
};
static struct sh_mmcif_dma sh7757lcr_mmcif_dma = {
.chan_priv_tx = SHDMA_SLAVE_MMCIF_TX,
.chan_priv_rx = SHDMA_SLAVE_MMCIF_RX,
.chan_priv_tx = {
.slave_id = SHDMA_SLAVE_MMCIF_TX,
},
.chan_priv_rx = {
.slave_id = SHDMA_SLAVE_MMCIF_RX,
}
};
static struct sh_mmcif_plat_data sh_mmcif_plat = {

Просмотреть файл

@ -1181,13 +1181,11 @@ static int __devinit ds_probe(struct vio_dev *vdev,
dp->rcv_buf_len = 4096;
dp->ds_states = kzalloc(sizeof(ds_states_template),
GFP_KERNEL);
dp->ds_states = kmemdup(ds_states_template,
sizeof(ds_states_template), GFP_KERNEL);
if (!dp->ds_states)
goto out_free_rcv_buf;
memcpy(dp->ds_states, ds_states_template,
sizeof(ds_states_template));
dp->num_ds_states = ARRAY_SIZE(ds_states_template);
for (i = 0; i < dp->num_ds_states; i++)

Просмотреть файл

@ -849,10 +849,10 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
if (!irq)
return -ENOMEM;
if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
return -EINVAL;
if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
return -EINVAL;
if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
return -EINVAL;
return irq;
}

Просмотреть файл

@ -58,12 +58,10 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len
void *new_val;
int err;
new_val = kmalloc(len, GFP_KERNEL);
new_val = kmemdup(val, len, GFP_KERNEL);
if (!new_val)
return -ENOMEM;
memcpy(new_val, val, len);
err = -ENODEV;
mutex_lock(&of_set_property_mutex);

Просмотреть файл

@ -302,8 +302,7 @@ void __init btfixup(void)
case 'i': /* INT */
if ((insn & 0xc1c00000) == 0x01000000) /* %HI */
set_addr(addr, q[1], fmangled, (insn & 0xffc00000) | (p[1] >> 10));
else if ((insn & 0x80002000) == 0x80002000 &&
(insn & 0x01800000) != 0x01800000) /* %LO */
else if ((insn & 0x80002000) == 0x80002000) /* %LO */
set_addr(addr, q[1], fmangled, (insn & 0xffffe000) | (p[1] & 0x3ff));
else {
prom_printf(insn_i, p, addr, insn);

Просмотреть файл

@ -53,13 +53,6 @@
*/
#define E820_RESERVED_KERN 128
/*
* Address ranges that need to be mapped by the kernel direct
* mapping. This is used to make sure regions such as
* EFI_RUNTIME_SERVICES_DATA are directly mapped. See setup_arch().
*/
#define E820_RESERVED_EFI 129
#ifndef __ASSEMBLY__
#include <linux/types.h>
struct e820entry {
@ -122,7 +115,6 @@ static inline void early_memtest(unsigned long start, unsigned long end)
}
#endif
extern unsigned long e820_end_pfn(unsigned long limit_pfn, unsigned type);
extern unsigned long e820_end_of_ram_pfn(void);
extern unsigned long e820_end_of_low_ram_pfn(void);
extern u64 early_reserve_e820(u64 startt, u64 sizet, u64 align);

Просмотреть файл

@ -33,6 +33,8 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
efi_call_virt(f, a1, a2, a3, a4, a5, a6)
#define efi_ioremap(addr, size, type) ioremap_cache(addr, size)
#else /* !CONFIG_X86_32 */
extern u64 efi_call0(void *fp);
@ -82,6 +84,9 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
u32 type);
#endif /* CONFIG_X86_32 */
extern int add_efi_memmap;

Просмотреть файл

@ -116,16 +116,16 @@ void show_registers(struct pt_regs *regs)
for (i = 0; i < code_len; i++, ip++) {
if (ip < (u8 *)PAGE_OFFSET ||
probe_kernel_address(ip, c)) {
printk(" Bad EIP value.");
printk(KERN_CONT " Bad EIP value.");
break;
}
if (ip == (u8 *)regs->ip)
printk("<%02x> ", c);
printk(KERN_CONT "<%02x> ", c);
else
printk("%02x ", c);
printk(KERN_CONT "%02x ", c);
}
}
printk("\n");
printk(KERN_CONT "\n");
}
int is_valid_bugaddr(unsigned long ip)

Просмотреть файл

@ -284,16 +284,16 @@ void show_registers(struct pt_regs *regs)
for (i = 0; i < code_len; i++, ip++) {
if (ip < (u8 *)PAGE_OFFSET ||
probe_kernel_address(ip, c)) {
printk(" Bad RIP value.");
printk(KERN_CONT " Bad RIP value.");
break;
}
if (ip == (u8 *)regs->ip)
printk("<%02x> ", c);
printk(KERN_CONT "<%02x> ", c);
else
printk("%02x ", c);
printk(KERN_CONT "%02x ", c);
}
}
printk("\n");
printk(KERN_CONT "\n");
}
int is_valid_bugaddr(unsigned long ip)

Просмотреть файл

@ -135,7 +135,6 @@ static void __init e820_print_type(u32 type)
printk(KERN_CONT "(usable)");
break;
case E820_RESERVED:
case E820_RESERVED_EFI:
printk(KERN_CONT "(reserved)");
break;
case E820_ACPI:
@ -784,7 +783,7 @@ u64 __init early_reserve_e820(u64 startt, u64 sizet, u64 align)
/*
* Find the highest page frame number we have available
*/
unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
{
int i;
unsigned long last_pfn = 0;

Просмотреть файл

@ -691,8 +691,6 @@ early_param("reservelow", parse_reservelow);
void __init setup_arch(char **cmdline_p)
{
unsigned long end_pfn;
#ifdef CONFIG_X86_32
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
visws_early_detect();
@ -934,24 +932,7 @@ void __init setup_arch(char **cmdline_p)
init_gbpages();
/* max_pfn_mapped is updated here */
end_pfn = max_low_pfn;
#ifdef CONFIG_X86_64
/*
* There may be regions after the last E820_RAM region that we
* want to include in the kernel direct mapping, such as
* EFI_RUNTIME_SERVICES_DATA.
*/
if (efi_enabled) {
unsigned long efi_end;
efi_end = e820_end_pfn(MAXMEM>>PAGE_SHIFT, E820_RESERVED_EFI);
if (efi_end > max_low_pfn)
end_pfn = efi_end;
}
#endif
max_low_pfn_mapped = init_memory_mapping(0, end_pfn << PAGE_SHIFT);
max_low_pfn_mapped = init_memory_mapping(0, max_low_pfn<<PAGE_SHIFT);
max_pfn_mapped = max_low_pfn_mapped;
#ifdef CONFIG_X86_64

Просмотреть файл

@ -568,8 +568,8 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
break;
}
if (filter[i].jt != 0) {
if (filter[i].jf)
t_offset += is_near(f_offset) ? 2 : 6;
if (filter[i].jf && f_offset)
t_offset += is_near(f_offset) ? 2 : 5;
EMIT_COND_JMP(t_op, t_offset);
if (filter[i].jf)
EMIT_JMP(f_offset);

Просмотреть файл

@ -323,13 +323,10 @@ static void __init do_add_efi_memmap(void)
case EFI_UNUSABLE_MEMORY:
e820_type = E820_UNUSABLE;
break;
case EFI_RUNTIME_SERVICES_DATA:
e820_type = E820_RESERVED_EFI;
break;
default:
/*
* EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
* EFI_MEMORY_MAPPED_IO
* EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
* EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
*/
e820_type = E820_RESERVED;
@ -674,21 +671,10 @@ void __init efi_enter_virtual_mode(void)
end_pfn = PFN_UP(end);
if (end_pfn <= max_low_pfn_mapped
|| (end_pfn > (1UL << (32 - PAGE_SHIFT))
&& end_pfn <= max_pfn_mapped)) {
&& end_pfn <= max_pfn_mapped))
va = __va(md->phys_addr);
if (!(md->attribute & EFI_MEMORY_WB)) {
addr = (u64) (unsigned long)va;
npages = md->num_pages;
memrange_efi_to_native(&addr, &npages);
set_memory_uc(addr, npages);
}
} else {
if (!(md->attribute & EFI_MEMORY_WB))
va = ioremap_nocache(md->phys_addr, size);
else
va = ioremap_cache(md->phys_addr, size);
}
else
va = efi_ioremap(md->phys_addr, size, md->type);
md->virt_addr = (u64) (unsigned long) va;
@ -698,6 +684,13 @@ void __init efi_enter_virtual_mode(void)
continue;
}
if (!(md->attribute & EFI_MEMORY_WB)) {
addr = md->virt_addr;
npages = md->num_pages;
memrange_efi_to_native(&addr, &npages);
set_memory_uc(addr, npages);
}
systab = (u64) (unsigned long) efi_phys.systab;
if (md->phys_addr <= systab && systab < end) {
systab += md->virt_addr - md->phys_addr;

Просмотреть файл

@ -39,43 +39,14 @@
*/
static unsigned long efi_rt_eflags;
static pgd_t efi_bak_pg_dir_pointer[2];
void efi_call_phys_prelog(void)
{
unsigned long cr4;
unsigned long temp;
struct desc_ptr gdt_descr;
local_irq_save(efi_rt_eflags);
/*
* If I don't have PAE, I should just duplicate two entries in page
* directory. If I have PAE, I just need to duplicate one entry in
* page directory.
*/
cr4 = read_cr4_safe();
if (cr4 & X86_CR4_PAE) {
efi_bak_pg_dir_pointer[0].pgd =
swapper_pg_dir[pgd_index(0)].pgd;
swapper_pg_dir[0].pgd =
swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
} else {
efi_bak_pg_dir_pointer[0].pgd =
swapper_pg_dir[pgd_index(0)].pgd;
efi_bak_pg_dir_pointer[1].pgd =
swapper_pg_dir[pgd_index(0x400000)].pgd;
swapper_pg_dir[pgd_index(0)].pgd =
swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
temp = PAGE_OFFSET + 0x400000;
swapper_pg_dir[pgd_index(0x400000)].pgd =
swapper_pg_dir[pgd_index(temp)].pgd;
}
/*
* After the lock is released, the original page table is restored.
*/
load_cr3(initial_page_table);
__flush_tlb_all();
gdt_descr.address = __pa(get_cpu_gdt_table(0));
@ -85,28 +56,13 @@ void efi_call_phys_prelog(void)
void efi_call_phys_epilog(void)
{
unsigned long cr4;
struct desc_ptr gdt_descr;
gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
gdt_descr.size = GDT_SIZE - 1;
load_gdt(&gdt_descr);
cr4 = read_cr4_safe();
if (cr4 & X86_CR4_PAE) {
swapper_pg_dir[pgd_index(0)].pgd =
efi_bak_pg_dir_pointer[0].pgd;
} else {
swapper_pg_dir[pgd_index(0)].pgd =
efi_bak_pg_dir_pointer[0].pgd;
swapper_pg_dir[pgd_index(0x400000)].pgd =
efi_bak_pg_dir_pointer[1].pgd;
}
/*
* After the lock is released, the original page table is restored.
*/
load_cr3(swapper_pg_dir);
__flush_tlb_all();
local_irq_restore(efi_rt_eflags);

Просмотреть файл

@ -80,3 +80,20 @@ void __init efi_call_phys_epilog(void)
local_irq_restore(efi_flags);
early_code_mapping_set_exec(0);
}
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
u32 type)
{
unsigned long last_map_pfn;
if (type == EFI_MEMORY_MAPPED_IO)
return ioremap(phys_addr, size);
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
unsigned long top = last_map_pfn << PAGE_SHIFT;
efi_ioremap(top, size - (top - phys_addr), type);
}
return (void __iomem *)__va(phys_addr);
}

Просмотреть файл

@ -173,9 +173,21 @@ static unsigned long __init xen_get_max_pages(void)
domid_t domid = DOMID_SELF;
int ret;
ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
if (ret > 0)
max_pages = ret;
/*
* For the initial domain we use the maximum reservation as
* the maximum page.
*
* For guest domains the current maximum reservation reflects
* the current maximum rather than the static maximum. In this
* case the e820 map provided to us will cover the static
* maximum region.
*/
if (xen_initial_domain()) {
ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid);
if (ret > 0)
max_pages = ret;
}
return min(max_pages, MAX_DOMAIN_PAGES);
}

Просмотреть файл

@ -366,7 +366,14 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
if (drain_all)
blk_throtl_drain(q);
__blk_run_queue(q);
/*
* This function might be called on a queue which failed
* driver init after queue creation. Some drivers
* (e.g. fd) get unhappy in such cases. Kick queue iff
* dispatch queue has something on it.
*/
if (!list_empty(&q->queue_head))
__blk_run_queue(q);
if (drain_all)
nr_rqs = q->rq.count[0] + q->rq.count[1];
@ -467,6 +474,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
q->backing_dev_info.state = 0;
q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY;
q->backing_dev_info.name = "block";
q->node = node_id;
err = bdi_init(&q->backing_dev_info);
if (err) {
@ -551,7 +559,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!uninit_q)
return NULL;
q = blk_init_allocated_queue_node(uninit_q, rfn, lock, node_id);
q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q)
blk_cleanup_queue(uninit_q);
@ -562,19 +570,10 @@ EXPORT_SYMBOL(blk_init_queue_node);
struct request_queue *
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
spinlock_t *lock)
{
return blk_init_allocated_queue_node(q, rfn, lock, -1);
}
EXPORT_SYMBOL(blk_init_allocated_queue);
struct request_queue *
blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
spinlock_t *lock, int node_id)
{
if (!q)
return NULL;
q->node = node_id;
if (blk_init_free_list(q))
return NULL;
@ -604,7 +603,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn,
return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue_node);
EXPORT_SYMBOL(blk_init_allocated_queue);
int blk_get_queue(struct request_queue *q)
{

Просмотреть файл

@ -3184,7 +3184,7 @@ static int cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc,
}
}
if (ret)
if (ret && ret != -EEXIST)
printk(KERN_ERR "cfq: cic link failed!\n");
return ret;
@ -3200,6 +3200,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
{
struct io_context *ioc = NULL;
struct cfq_io_context *cic;
int ret;
might_sleep_if(gfp_mask & __GFP_WAIT);
@ -3207,6 +3208,7 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
if (!ioc)
return NULL;
retry:
cic = cfq_cic_lookup(cfqd, ioc);
if (cic)
goto out;
@ -3215,7 +3217,12 @@ cfq_get_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
if (cic == NULL)
goto err;
if (cfq_cic_link(cfqd, ioc, cic, gfp_mask))
ret = cfq_cic_link(cfqd, ioc, cic, gfp_mask);
if (ret == -EEXIST) {
/* someone has linked cic to ioc already */
cfq_cic_free(cic);
goto retry;
} else if (ret)
goto err_free;
out:
@ -4036,6 +4043,11 @@ static void *cfq_init_queue(struct request_queue *q)
if (blkio_alloc_blkg_stats(&cfqg->blkg)) {
kfree(cfqg);
spin_lock(&cic_index_lock);
ida_remove(&cic_index_ida, cfqd->cic_index);
spin_unlock(&cic_index_lock);
kfree(cfqd);
return NULL;
}

Просмотреть файл

@ -820,7 +820,7 @@ config PATA_PLATFORM
config PATA_OF_PLATFORM
tristate "OpenFirmware platform device PATA support"
depends on PATA_PLATFORM && OF
depends on PATA_PLATFORM && OF && OF_IRQ
help
This option enables support for generic directly connected ATA
devices commonly found on embedded systems with OpenFirmware

Просмотреть файл

@ -2601,6 +2601,8 @@ static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff,
c->Request.Timeout = 0;
c->Request.CDB[0] = BMIC_WRITE;
c->Request.CDB[6] = BMIC_CACHE_FLUSH;
c->Request.CDB[7] = (size >> 8) & 0xFF;
c->Request.CDB[8] = size & 0xFF;
break;
case TEST_UNIT_READY:
c->Request.CDBLen = 6;
@ -4880,7 +4882,7 @@ static int cciss_request_irq(ctlr_info_t *h,
{
if (h->msix_vector || h->msi_vector) {
if (!request_irq(h->intr[h->intr_mode], msixhandler,
IRQF_DISABLED, h->devname, h))
0, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get msi irq %d"
" for %s\n", h->intr[h->intr_mode],
@ -4889,7 +4891,7 @@ static int cciss_request_irq(ctlr_info_t *h,
}
if (!request_irq(h->intr[h->intr_mode], intxhandler,
IRQF_DISABLED, h->devname, h))
IRQF_SHARED, h->devname, h))
return 0;
dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n",
h->intr[h->intr_mode], h->devname);

Просмотреть файл

@ -422,7 +422,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
/*
* We use punch hole to reclaim the free space used by the
* image a.k.a. discard. However we do support discard if
* image a.k.a. discard. However we do not support discard if
* encryption is enabled, because it may give an attacker
* useful information.
*/
@ -797,7 +797,7 @@ static void loop_config_discard(struct loop_device *lo)
}
q->limits.discard_granularity = inode->i_sb->s_blocksize;
q->limits.discard_alignment = inode->i_sb->s_blocksize;
q->limits.discard_alignment = 0;
q->limits.max_discard_sectors = UINT_MAX >> 9;
q->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);

Просмотреть файл

@ -183,10 +183,6 @@ static LIST_HEAD(rbd_client_list); /* clients */
static int __rbd_init_snaps_header(struct rbd_device *rbd_dev);
static void rbd_dev_release(struct device *dev);
static ssize_t rbd_snap_rollback(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t size);
static ssize_t rbd_snap_add(struct device *dev,
struct device_attribute *attr,
const char *buf,
@ -461,6 +457,10 @@ static int rbd_header_from_disk(struct rbd_image_header *header,
u32 snap_count = le32_to_cpu(ondisk->snap_count);
int ret = -ENOMEM;
if (memcmp(ondisk, RBD_HEADER_TEXT, sizeof(RBD_HEADER_TEXT))) {
return -ENXIO;
}
init_rwsem(&header->snap_rwsem);
header->snap_names_len = le64_to_cpu(ondisk->snap_names_len);
header->snapc = kmalloc(sizeof(struct ceph_snap_context) +
@ -1355,32 +1355,6 @@ fail:
return ret;
}
/*
* Request sync osd rollback
*/
static int rbd_req_sync_rollback_obj(struct rbd_device *dev,
u64 snapid,
const char *obj)
{
struct ceph_osd_req_op *ops;
int ret = rbd_create_rw_ops(&ops, 1, CEPH_OSD_OP_ROLLBACK, 0);
if (ret < 0)
return ret;
ops[0].snap.snapid = snapid;
ret = rbd_req_sync_op(dev, NULL,
CEPH_NOSNAP,
0,
CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK,
ops,
1, obj, 0, 0, NULL, NULL, NULL);
rbd_destroy_ops(ops);
return ret;
}
/*
* Request sync osd read
*/
@ -1610,8 +1584,13 @@ static int rbd_read_header(struct rbd_device *rbd_dev,
goto out_dh;
rc = rbd_header_from_disk(header, dh, snap_count, GFP_KERNEL);
if (rc < 0)
if (rc < 0) {
if (rc == -ENXIO) {
pr_warning("unrecognized header format"
" for image %s", rbd_dev->obj);
}
goto out_dh;
}
if (snap_count != header->total_snaps) {
snap_count = header->total_snaps;
@ -1882,7 +1861,6 @@ static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
static DEVICE_ATTR(create_snap, S_IWUSR, NULL, rbd_snap_add);
static DEVICE_ATTR(rollback_snap, S_IWUSR, NULL, rbd_snap_rollback);
static struct attribute *rbd_attrs[] = {
&dev_attr_size.attr,
@ -1893,7 +1871,6 @@ static struct attribute *rbd_attrs[] = {
&dev_attr_current_snap.attr,
&dev_attr_refresh.attr,
&dev_attr_create_snap.attr,
&dev_attr_rollback_snap.attr,
NULL
};
@ -2424,64 +2401,6 @@ err_unlock:
return ret;
}
static ssize_t rbd_snap_rollback(struct device *dev,
struct device_attribute *attr,
const char *buf,
size_t count)
{
struct rbd_device *rbd_dev = dev_to_rbd(dev);
int ret;
u64 snapid;
u64 cur_ofs;
char *seg_name = NULL;
char *snap_name = kmalloc(count + 1, GFP_KERNEL);
ret = -ENOMEM;
if (!snap_name)
return ret;
/* parse snaps add command */
snprintf(snap_name, count, "%s", buf);
seg_name = kmalloc(RBD_MAX_SEG_NAME_LEN + 1, GFP_NOIO);
if (!seg_name)
goto done;
mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
ret = snap_by_name(&rbd_dev->header, snap_name, &snapid, NULL);
if (ret < 0)
goto done_unlock;
dout("snapid=%lld\n", snapid);
cur_ofs = 0;
while (cur_ofs < rbd_dev->header.image_size) {
cur_ofs += rbd_get_segment(&rbd_dev->header,
rbd_dev->obj,
cur_ofs, (u64)-1,
seg_name, NULL);
dout("seg_name=%s\n", seg_name);
ret = rbd_req_sync_rollback_obj(rbd_dev, snapid, seg_name);
if (ret < 0)
pr_warning("could not roll back obj %s err=%d\n",
seg_name, ret);
}
ret = __rbd_update_snaps(rbd_dev);
if (ret < 0)
goto done_unlock;
ret = count;
done_unlock:
mutex_unlock(&ctl_mutex);
done:
kfree(seg_name);
kfree(snap_name);
return ret;
}
static struct bus_attribute rbd_bus_attrs[] = {
__ATTR(add, S_IWUSR, NULL, rbd_add),
__ATTR(remove, S_IWUSR, NULL, rbd_remove),

Просмотреть файл

@ -16,6 +16,8 @@
* handle GCR disks
*/
#undef DEBUG
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@ -36,13 +38,11 @@
#include <asm/machdep.h>
#include <asm/pmac_feature.h>
static DEFINE_MUTEX(swim3_mutex);
static struct request_queue *swim3_queue;
static struct gendisk *disks[2];
static struct request *fd_req;
#define MAX_FLOPPIES 2
static DEFINE_MUTEX(swim3_mutex);
static struct gendisk *disks[MAX_FLOPPIES];
enum swim_state {
idle,
locating,
@ -177,7 +177,6 @@ struct swim3 {
struct floppy_state {
enum swim_state state;
spinlock_t lock;
struct swim3 __iomem *swim3; /* hardware registers */
struct dbdma_regs __iomem *dma; /* DMA controller registers */
int swim3_intr; /* interrupt number for SWIM3 */
@ -204,8 +203,20 @@ struct floppy_state {
int wanted;
struct macio_dev *mdev;
char dbdma_cmd_space[5 * sizeof(struct dbdma_cmd)];
int index;
struct request *cur_req;
};
#define swim3_err(fmt, arg...) dev_err(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
#define swim3_warn(fmt, arg...) dev_warn(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
#define swim3_info(fmt, arg...) dev_info(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
#ifdef DEBUG
#define swim3_dbg(fmt, arg...) dev_dbg(&fs->mdev->ofdev.dev, "[fd%d] " fmt, fs->index, arg)
#else
#define swim3_dbg(fmt, arg...) do { } while(0)
#endif
static struct floppy_state floppy_states[MAX_FLOPPIES];
static int floppy_count = 0;
static DEFINE_SPINLOCK(swim3_lock);
@ -224,17 +235,8 @@ static unsigned short write_postamble[] = {
0, 0, 0, 0, 0, 0
};
static void swim3_select(struct floppy_state *fs, int sel);
static void swim3_action(struct floppy_state *fs, int action);
static int swim3_readbit(struct floppy_state *fs, int bit);
static void do_fd_request(struct request_queue * q);
static void start_request(struct floppy_state *fs);
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long));
static void scan_track(struct floppy_state *fs);
static void seek_track(struct floppy_state *fs, int n);
static void init_dma(struct dbdma_cmd *cp, int cmd, void *buf, int count);
static void setup_transfer(struct floppy_state *fs);
static void act(struct floppy_state *fs);
static void scan_timeout(unsigned long data);
static void seek_timeout(unsigned long data);
@ -254,20 +256,23 @@ static unsigned int floppy_check_events(struct gendisk *disk,
unsigned int clearing);
static int floppy_revalidate(struct gendisk *disk);
static bool swim3_end_request(int err, unsigned int nr_bytes)
static bool swim3_end_request(struct floppy_state *fs, int err, unsigned int nr_bytes)
{
if (__blk_end_request(fd_req, err, nr_bytes))
struct request *req = fs->cur_req;
int rc;
swim3_dbg(" end request, err=%d nr_bytes=%d, cur_req=%p\n",
err, nr_bytes, req);
if (err)
nr_bytes = blk_rq_cur_bytes(req);
rc = __blk_end_request(req, err, nr_bytes);
if (rc)
return true;
fd_req = NULL;
fs->cur_req = NULL;
return false;
}
static bool swim3_end_request_cur(int err)
{
return swim3_end_request(err, blk_rq_cur_bytes(fd_req));
}
static void swim3_select(struct floppy_state *fs, int sel)
{
struct swim3 __iomem *sw = fs->swim3;
@ -303,50 +308,53 @@ static int swim3_readbit(struct floppy_state *fs, int bit)
return (stat & DATA) == 0;
}
static void do_fd_request(struct request_queue * q)
{
int i;
for(i=0; i<floppy_count; i++) {
struct floppy_state *fs = &floppy_states[i];
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD)
continue;
start_request(fs);
}
}
static void start_request(struct floppy_state *fs)
{
struct request *req;
unsigned long x;
swim3_dbg("start request, initial state=%d\n", fs->state);
if (fs->state == idle && fs->wanted) {
fs->state = available;
wake_up(&fs->wait);
return;
}
while (fs->state == idle) {
if (!fd_req) {
fd_req = blk_fetch_request(swim3_queue);
if (!fd_req)
swim3_dbg("start request, idle loop, cur_req=%p\n", fs->cur_req);
if (!fs->cur_req) {
fs->cur_req = blk_fetch_request(disks[fs->index]->queue);
swim3_dbg(" fetched request %p\n", fs->cur_req);
if (!fs->cur_req)
break;
}
req = fd_req;
#if 0
printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
req->rq_disk->disk_name, req->cmd,
(long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
printk(" errors=%d current_nr_sectors=%u\n",
req->errors, blk_rq_cur_sectors(req));
req = fs->cur_req;
if (fs->mdev->media_bay &&
check_media_bay(fs->mdev->media_bay) != MB_FD) {
swim3_dbg("%s", " media bay absent, dropping req\n");
swim3_end_request(fs, -ENODEV, 0);
continue;
}
#if 0 /* This is really too verbose */
swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
req->rq_disk->disk_name, req->cmd,
(long)blk_rq_pos(req), blk_rq_sectors(req),
req->buffer);
swim3_dbg(" errors=%d current_nr_sectors=%u\n",
req->errors, blk_rq_cur_sectors(req));
#endif
if (blk_rq_pos(req) >= fs->total_secs) {
swim3_end_request_cur(-EIO);
swim3_dbg(" pos out of bounds (%ld, max is %ld)\n",
(long)blk_rq_pos(req), (long)fs->total_secs);
swim3_end_request(fs, -EIO, 0);
continue;
}
if (fs->ejected) {
swim3_end_request_cur(-EIO);
swim3_dbg("%s", " disk ejected\n");
swim3_end_request(fs, -EIO, 0);
continue;
}
@ -354,7 +362,8 @@ static void start_request(struct floppy_state *fs)
if (fs->write_prot < 0)
fs->write_prot = swim3_readbit(fs, WRITE_PROT);
if (fs->write_prot) {
swim3_end_request_cur(-EIO);
swim3_dbg("%s", " try to write, disk write protected\n");
swim3_end_request(fs, -EIO, 0);
continue;
}
}
@ -369,7 +378,6 @@ static void start_request(struct floppy_state *fs)
x = ((long)blk_rq_pos(req)) % fs->secpercyl;
fs->head = x / fs->secpertrack;
fs->req_sector = x % fs->secpertrack + 1;
fd_req = req;
fs->state = do_transfer;
fs->retries = 0;
@ -377,12 +385,14 @@ static void start_request(struct floppy_state *fs)
}
}
static void do_fd_request(struct request_queue * q)
{
start_request(q->queuedata);
}
static void set_timeout(struct floppy_state *fs, int nticks,
void (*proc)(unsigned long))
{
unsigned long flags;
spin_lock_irqsave(&fs->lock, flags);
if (fs->timeout_pending)
del_timer(&fs->timeout);
fs->timeout.expires = jiffies + nticks;
@ -390,7 +400,6 @@ static void set_timeout(struct floppy_state *fs, int nticks,
fs->timeout.data = (unsigned long) fs;
add_timer(&fs->timeout);
fs->timeout_pending = 1;
spin_unlock_irqrestore(&fs->lock, flags);
}
static inline void scan_track(struct floppy_state *fs)
@ -442,40 +451,45 @@ static inline void setup_transfer(struct floppy_state *fs)
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_cmd *cp = fs->dma_cmd;
struct dbdma_regs __iomem *dr = fs->dma;
struct request *req = fs->cur_req;
if (blk_rq_cur_sectors(fd_req) <= 0) {
printk(KERN_ERR "swim3: transfer 0 sectors?\n");
if (blk_rq_cur_sectors(req) <= 0) {
swim3_warn("%s", "Transfer 0 sectors ?\n");
return;
}
if (rq_data_dir(fd_req) == WRITE)
if (rq_data_dir(req) == WRITE)
n = 1;
else {
n = fs->secpertrack - fs->req_sector + 1;
if (n > blk_rq_cur_sectors(fd_req))
n = blk_rq_cur_sectors(fd_req);
if (n > blk_rq_cur_sectors(req))
n = blk_rq_cur_sectors(req);
}
swim3_dbg(" setup xfer at sect %d (of %d) head %d for %d\n",
fs->req_sector, fs->secpertrack, fs->head, n);
fs->scount = n;
swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0);
out_8(&sw->sector, fs->req_sector);
out_8(&sw->nsect, n);
out_8(&sw->gap3, 0);
out_le32(&dr->cmdptr, virt_to_bus(cp));
if (rq_data_dir(fd_req) == WRITE) {
if (rq_data_dir(req) == WRITE) {
/* Set up 3 dma commands: write preamble, data, postamble */
init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
++cp;
init_dma(cp, OUTPUT_MORE, fd_req->buffer, 512);
init_dma(cp, OUTPUT_MORE, req->buffer, 512);
++cp;
init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
} else {
init_dma(cp, INPUT_LAST, fd_req->buffer, n * 512);
init_dma(cp, INPUT_LAST, req->buffer, n * 512);
}
++cp;
out_le16(&cp->command, DBDMA_STOP);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
in_8(&sw->error);
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
if (rq_data_dir(fd_req) == WRITE)
if (rq_data_dir(req) == WRITE)
out_8(&sw->control_bis, WRITE_SECTORS);
in_8(&sw->intr);
out_le32(&dr->control, (RUN << 16) | RUN);
@ -488,12 +502,16 @@ static inline void setup_transfer(struct floppy_state *fs)
static void act(struct floppy_state *fs)
{
for (;;) {
swim3_dbg(" act loop, state=%d, req_cyl=%d, cur_cyl=%d\n",
fs->state, fs->req_cyl, fs->cur_cyl);
switch (fs->state) {
case idle:
return; /* XXX shouldn't get here */
case locating:
if (swim3_readbit(fs, TRACK_ZERO)) {
swim3_dbg("%s", " locate track 0\n");
fs->cur_cyl = 0;
if (fs->req_cyl == 0)
fs->state = do_transfer;
@ -511,7 +529,7 @@ static void act(struct floppy_state *fs)
break;
}
if (fs->req_cyl == fs->cur_cyl) {
printk("whoops, seeking 0\n");
swim3_warn("%s", "Whoops, seeking 0\n");
fs->state = do_transfer;
break;
}
@ -527,7 +545,9 @@ static void act(struct floppy_state *fs)
case do_transfer:
if (fs->cur_cyl != fs->req_cyl) {
if (fs->retries > 5) {
swim3_end_request_cur(-EIO);
swim3_err("Wrong cylinder in transfer, want: %d got %d\n",
fs->req_cyl, fs->cur_cyl);
swim3_end_request(fs, -EIO, 0);
fs->state = idle;
return;
}
@ -542,7 +562,7 @@ static void act(struct floppy_state *fs)
return;
default:
printk(KERN_ERR"swim3: unknown state %d\n", fs->state);
swim3_err("Unknown state %d\n", fs->state);
return;
}
}
@ -552,59 +572,75 @@ static void scan_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
swim3_dbg("* scan timeout, state=%d\n", fs->state);
spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_ACTION | WRITE_SECTORS);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
fs->cur_cyl = -1;
if (fs->retries > 5) {
swim3_end_request_cur(-EIO);
swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
fs->state = jogging;
act(fs);
}
spin_unlock_irqrestore(&swim3_lock, flags);
}
static void seek_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
swim3_dbg("* seek timeout, state=%d\n", fs->state);
spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_8(&sw->control_bic, DO_SEEK);
out_8(&sw->select, RELAX);
out_8(&sw->intr_enable, 0);
printk(KERN_ERR "swim3: seek timeout\n");
swim3_end_request_cur(-EIO);
swim3_err("%s", "Seek timeout\n");
swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
}
static void settle_timeout(unsigned long data)
{
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
unsigned long flags;
swim3_dbg("* settle timeout, state=%d\n", fs->state);
spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
if (swim3_readbit(fs, SEEK_COMPLETE)) {
out_8(&sw->select, RELAX);
fs->state = locating;
act(fs);
return;
goto unlock;
}
out_8(&sw->select, RELAX);
if (fs->settle_time < 2*HZ) {
++fs->settle_time;
set_timeout(fs, 1, settle_timeout);
return;
goto unlock;
}
printk(KERN_ERR "swim3: seek settle timeout\n");
swim3_end_request_cur(-EIO);
swim3_err("%s", "Seek settle timeout\n");
swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
unlock:
spin_unlock_irqrestore(&swim3_lock, flags);
}
static void xfer_timeout(unsigned long data)
@ -612,8 +648,12 @@ static void xfer_timeout(unsigned long data)
struct floppy_state *fs = (struct floppy_state *) data;
struct swim3 __iomem *sw = fs->swim3;
struct dbdma_regs __iomem *dr = fs->dma;
unsigned long flags;
int n;
swim3_dbg("* xfer timeout, state=%d\n", fs->state);
spin_lock_irqsave(&swim3_lock, flags);
fs->timeout_pending = 0;
out_le32(&dr->control, RUN << 16);
/* We must wait a bit for dbdma to stop */
@ -622,12 +662,13 @@ static void xfer_timeout(unsigned long data)
out_8(&sw->intr_enable, 0);
out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION);
out_8(&sw->select, RELAX);
printk(KERN_ERR "swim3: timeout %sing sector %ld\n",
(rq_data_dir(fd_req)==WRITE? "writ": "read"),
(long)blk_rq_pos(fd_req));
swim3_end_request_cur(-EIO);
swim3_err("Timeout %sing sector %ld\n",
(rq_data_dir(fs->cur_req)==WRITE? "writ": "read"),
(long)blk_rq_pos(fs->cur_req));
swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
spin_unlock_irqrestore(&swim3_lock, flags);
}
static irqreturn_t swim3_interrupt(int irq, void *dev_id)
@ -638,12 +679,17 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
int stat, resid;
struct dbdma_regs __iomem *dr;
struct dbdma_cmd *cp;
unsigned long flags;
struct request *req = fs->cur_req;
swim3_dbg("* interrupt, state=%d\n", fs->state);
spin_lock_irqsave(&swim3_lock, flags);
intr = in_8(&sw->intr);
err = (intr & ERROR_INTR)? in_8(&sw->error): 0;
if ((intr & ERROR_INTR) && fs->state != do_transfer)
printk(KERN_ERR "swim3_interrupt, state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(fd_req), intr, err);
swim3_err("Non-transfer error interrupt: state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(req), intr, err);
switch (fs->state) {
case locating:
if (intr & SEEN_SECTOR) {
@ -653,10 +699,10 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
del_timer(&fs->timeout);
fs->timeout_pending = 0;
if (sw->ctrack == 0xff) {
printk(KERN_ERR "swim3: seen sector but cyl=ff?\n");
swim3_err("%s", "Seen sector but cyl=ff?\n");
fs->cur_cyl = -1;
if (fs->retries > 5) {
swim3_end_request_cur(-EIO);
swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
} else {
@ -668,8 +714,8 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->cur_cyl = sw->ctrack;
fs->cur_sector = sw->csect;
if (fs->expect_cyl != -1 && fs->expect_cyl != fs->cur_cyl)
printk(KERN_ERR "swim3: expected cyl %d, got %d\n",
fs->expect_cyl, fs->cur_cyl);
swim3_err("Expected cyl %d, got %d\n",
fs->expect_cyl, fs->cur_cyl);
fs->state = do_transfer;
act(fs);
}
@ -704,7 +750,7 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
fs->timeout_pending = 0;
dr = fs->dma;
cp = fs->dma_cmd;
if (rq_data_dir(fd_req) == WRITE)
if (rq_data_dir(req) == WRITE)
++cp;
/*
* Check that the main data transfer has finished.
@ -729,31 +775,32 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
if (intr & ERROR_INTR) {
n = fs->scount - 1 - resid / 512;
if (n > 0) {
blk_update_request(fd_req, 0, n << 9);
blk_update_request(req, 0, n << 9);
fs->req_sector += n;
}
if (fs->retries < 5) {
++fs->retries;
act(fs);
} else {
printk("swim3: error %sing block %ld (err=%x)\n",
rq_data_dir(fd_req) == WRITE? "writ": "read",
(long)blk_rq_pos(fd_req), err);
swim3_end_request_cur(-EIO);
swim3_err("Error %sing block %ld (err=%x)\n",
rq_data_dir(req) == WRITE? "writ": "read",
(long)blk_rq_pos(req), err);
swim3_end_request(fs, -EIO, 0);
fs->state = idle;
}
} else {
if ((stat & ACTIVE) == 0 || resid != 0) {
/* musta been an error */
printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid);
printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(fd_req), intr, err);
swim3_end_request_cur(-EIO);
swim3_err("fd dma error: stat=%x resid=%d\n", stat, resid);
swim3_err(" state=%d, dir=%x, intr=%x, err=%x\n",
fs->state, rq_data_dir(req), intr, err);
swim3_end_request(fs, -EIO, 0);
fs->state = idle;
start_request(fs);
break;
}
if (swim3_end_request(0, fs->scount << 9)) {
fs->retries = 0;
if (swim3_end_request(fs, 0, fs->scount << 9)) {
fs->req_sector += fs->scount;
if (fs->req_sector > fs->secpertrack) {
fs->req_sector -= fs->secpertrack;
@ -770,8 +817,9 @@ static irqreturn_t swim3_interrupt(int irq, void *dev_id)
start_request(fs);
break;
default:
printk(KERN_ERR "swim3: don't know what to do in state %d\n", fs->state);
swim3_err("Don't know what to do in state %d\n", fs->state);
}
spin_unlock_irqrestore(&swim3_lock, flags);
return IRQ_HANDLED;
}
@ -781,26 +829,31 @@ static void fd_dma_interrupt(int irq, void *dev_id)
}
*/
/* Called under the mutex to grab exclusive access to a drive */
static int grab_drive(struct floppy_state *fs, enum swim_state state,
int interruptible)
{
unsigned long flags;
spin_lock_irqsave(&fs->lock, flags);
if (fs->state != idle) {
swim3_dbg("%s", "-> grab drive\n");
spin_lock_irqsave(&swim3_lock, flags);
if (fs->state != idle && fs->state != available) {
++fs->wanted;
while (fs->state != available) {
spin_unlock_irqrestore(&swim3_lock, flags);
if (interruptible && signal_pending(current)) {
--fs->wanted;
spin_unlock_irqrestore(&fs->lock, flags);
return -EINTR;
}
interruptible_sleep_on(&fs->wait);
spin_lock_irqsave(&swim3_lock, flags);
}
--fs->wanted;
}
fs->state = state;
spin_unlock_irqrestore(&fs->lock, flags);
spin_unlock_irqrestore(&swim3_lock, flags);
return 0;
}
@ -808,10 +861,12 @@ static void release_drive(struct floppy_state *fs)
{
unsigned long flags;
spin_lock_irqsave(&fs->lock, flags);
swim3_dbg("%s", "-> release drive\n");
spin_lock_irqsave(&swim3_lock, flags);
fs->state = idle;
start_request(fs);
spin_unlock_irqrestore(&fs->lock, flags);
spin_unlock_irqrestore(&swim3_lock, flags);
}
static int fd_eject(struct floppy_state *fs)
@ -966,6 +1021,7 @@ static int floppy_release(struct gendisk *disk, fmode_t mode)
{
struct floppy_state *fs = disk->private_data;
struct swim3 __iomem *sw = fs->swim3;
mutex_lock(&swim3_mutex);
if (fs->ref_count > 0 && --fs->ref_count == 0) {
swim3_action(fs, MOTOR_OFF);
@ -1031,30 +1087,48 @@ static const struct block_device_operations floppy_fops = {
.revalidate_disk= floppy_revalidate,
};
static void swim3_mb_event(struct macio_dev* mdev, int mb_state)
{
struct floppy_state *fs = macio_get_drvdata(mdev);
struct swim3 __iomem *sw = fs->swim3;
if (!fs)
return;
if (mb_state != MB_FD)
return;
/* Clear state */
out_8(&sw->intr_enable, 0);
in_8(&sw->intr);
in_8(&sw->error);
}
static int swim3_add_device(struct macio_dev *mdev, int index)
{
struct device_node *swim = mdev->ofdev.dev.of_node;
struct floppy_state *fs = &floppy_states[index];
int rc = -EBUSY;
/* Do this first for message macros */
memset(fs, 0, sizeof(*fs));
fs->mdev = mdev;
fs->index = index;
/* Check & Request resources */
if (macio_resource_count(mdev) < 2) {
printk(KERN_WARNING "ifd%d: no address for %s\n",
index, swim->full_name);
swim3_err("%s", "No address in device-tree\n");
return -ENXIO;
}
if (macio_irq_count(mdev) < 2) {
printk(KERN_WARNING "fd%d: no intrs for device %s\n",
index, swim->full_name);
if (macio_irq_count(mdev) < 1) {
swim3_err("%s", "No interrupt in device-tree\n");
return -ENXIO;
}
if (macio_request_resource(mdev, 0, "swim3 (mmio)")) {
printk(KERN_ERR "fd%d: can't request mmio resource for %s\n",
index, swim->full_name);
swim3_err("%s", "Can't request mmio resource\n");
return -EBUSY;
}
if (macio_request_resource(mdev, 1, "swim3 (dma)")) {
printk(KERN_ERR "fd%d: can't request dma resource for %s\n",
index, swim->full_name);
swim3_err("%s", "Can't request dma resource\n");
macio_release_resource(mdev, 0);
return -EBUSY;
}
@ -1063,22 +1137,18 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
if (mdev->media_bay == NULL)
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 1);
memset(fs, 0, sizeof(*fs));
spin_lock_init(&fs->lock);
fs->state = idle;
fs->swim3 = (struct swim3 __iomem *)
ioremap(macio_resource_start(mdev, 0), 0x200);
if (fs->swim3 == NULL) {
printk("fd%d: couldn't map registers for %s\n",
index, swim->full_name);
swim3_err("%s", "Couldn't map mmio registers\n");
rc = -ENOMEM;
goto out_release;
}
fs->dma = (struct dbdma_regs __iomem *)
ioremap(macio_resource_start(mdev, 1), 0x200);
if (fs->dma == NULL) {
printk("fd%d: couldn't map DMA for %s\n",
index, swim->full_name);
swim3_err("%s", "Couldn't map dma registers\n");
iounmap(fs->swim3);
rc = -ENOMEM;
goto out_release;
@ -1090,31 +1160,25 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
fs->secpercyl = 36;
fs->secpertrack = 18;
fs->total_secs = 2880;
fs->mdev = mdev;
init_waitqueue_head(&fs->wait);
fs->dma_cmd = (struct dbdma_cmd *) DBDMA_ALIGN(fs->dbdma_cmd_space);
memset(fs->dma_cmd, 0, 2 * sizeof(struct dbdma_cmd));
st_le16(&fs->dma_cmd[1].command, DBDMA_STOP);
if (mdev->media_bay == NULL || check_media_bay(mdev->media_bay) == MB_FD)
swim3_mb_event(mdev, MB_FD);
if (request_irq(fs->swim3_intr, swim3_interrupt, 0, "SWIM3", fs)) {
printk(KERN_ERR "fd%d: couldn't request irq %d for %s\n",
index, fs->swim3_intr, swim->full_name);
swim3_err("%s", "Couldn't request interrupt\n");
pmac_call_feature(PMAC_FTR_SWIM3_ENABLE, swim, 0, 0);
goto out_unmap;
return -EBUSY;
}
/*
if (request_irq(fs->dma_intr, fd_dma_interrupt, 0, "SWIM3-dma", fs)) {
printk(KERN_ERR "Couldn't get irq %d for SWIM3 DMA",
fs->dma_intr);
return -EBUSY;
}
*/
init_timer(&fs->timeout);
printk(KERN_INFO "fd%d: SWIM3 floppy controller %s\n", floppy_count,
swim3_info("SWIM3 floppy controller %s\n",
mdev->media_bay ? "in media bay" : "");
return 0;
@ -1132,41 +1196,42 @@ static int swim3_add_device(struct macio_dev *mdev, int index)
static int __devinit swim3_attach(struct macio_dev *mdev, const struct of_device_id *match)
{
int i, rc;
struct gendisk *disk;
int index, rc;
index = floppy_count++;
if (index >= MAX_FLOPPIES)
return -ENXIO;
/* Add the drive */
rc = swim3_add_device(mdev, floppy_count);
rc = swim3_add_device(mdev, index);
if (rc)
return rc;
/* Now register that disk. Same comment about failure handling */
disk = disks[index] = alloc_disk(1);
if (disk == NULL)
return -ENOMEM;
disk->queue = blk_init_queue(do_fd_request, &swim3_lock);
if (disk->queue == NULL) {
put_disk(disk);
return -ENOMEM;
}
disk->queue->queuedata = &floppy_states[index];
/* Now create the queue if not there yet */
if (swim3_queue == NULL) {
if (index == 0) {
/* If we failed, there isn't much we can do as the driver is still
* too dumb to remove the device, just bail out
*/
if (register_blkdev(FLOPPY_MAJOR, "fd"))
return 0;
swim3_queue = blk_init_queue(do_fd_request, &swim3_lock);
if (swim3_queue == NULL) {
unregister_blkdev(FLOPPY_MAJOR, "fd");
return 0;
}
}
/* Now register that disk. Same comment about failure handling */
i = floppy_count++;
disk = disks[i] = alloc_disk(1);
if (disk == NULL)
return 0;
disk->major = FLOPPY_MAJOR;
disk->first_minor = i;
disk->first_minor = index;
disk->fops = &floppy_fops;
disk->private_data = &floppy_states[i];
disk->queue = swim3_queue;
disk->private_data = &floppy_states[index];
disk->flags |= GENHD_FL_REMOVABLE;
sprintf(disk->disk_name, "fd%d", i);
sprintf(disk->disk_name, "fd%d", index);
set_capacity(disk, 2880);
add_disk(disk);
@ -1194,6 +1259,9 @@ static struct macio_driver swim3_driver =
.of_match_table = swim3_match,
},
.probe = swim3_attach,
#ifdef CONFIG_PMAC_MEDIABAY
.mediabay_event = swim3_mb_event,
#endif
#if 0
.suspend = swim3_suspend,
.resume = swim3_resume,

Просмотреть файл

@ -188,7 +188,7 @@ config BT_MRVL
The core driver to support Marvell Bluetooth devices.
This driver is required if you want to support
Marvell Bluetooth devices, such as 8688/8787.
Marvell Bluetooth devices, such as 8688/8787/8797.
Say Y here to compile Marvell Bluetooth driver
into the kernel or say M to compile it as module.
@ -201,8 +201,8 @@ config BT_MRVL_SDIO
The driver for Marvell Bluetooth chipsets with SDIO interface.
This driver is required if you want to use Marvell Bluetooth
devices with SDIO interface. Currently SD8688/SD8787 chipsets are
supported.
devices with SDIO interface. Currently SD8688/SD8787/SD8797
chipsets are supported.
Say Y here to compile support for Marvell BT-over-SDIO driver
into the kernel or say M to compile it as module.

Просмотреть файл

@ -65,7 +65,7 @@ static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = {
.io_port_1 = 0x01,
.io_port_2 = 0x02,
};
static const struct btmrvl_sdio_card_reg btmrvl_reg_8787 = {
static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = {
.cfg = 0x00,
.host_int_mask = 0x02,
.host_intstatus = 0x03,
@ -92,7 +92,14 @@ static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = {
static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = {
.helper = NULL,
.firmware = "mrvl/sd8787_uapsta.bin",
.reg = &btmrvl_reg_8787,
.reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = {
.helper = NULL,
.firmware = "mrvl/sd8797_uapsta.bin",
.reg = &btmrvl_reg_87xx,
.sd_blksz_fw_dl = 256,
};
@ -103,6 +110,9 @@ static const struct sdio_device_id btmrvl_sdio_ids[] = {
/* Marvell SD8787 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8787 },
/* Marvell SD8797 Bluetooth device */
{ SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A),
.driver_data = (unsigned long) &btmrvl_sdio_sd8797 },
{ } /* Terminating entry */
};
@ -1076,3 +1086,4 @@ MODULE_LICENSE("GPL v2");
MODULE_FIRMWARE("sd8688_helper.bin");
MODULE_FIRMWARE("sd8688.bin");
MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin");
MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin");

Просмотреть файл

@ -777,9 +777,8 @@ skip_waking:
usb_mark_last_busy(data->udev);
}
usb_free_urb(urb);
done:
usb_free_urb(urb);
return err;
}

Просмотреть файл

@ -139,6 +139,8 @@
#define IPMI_WDOG_SET_TIMER 0x24
#define IPMI_WDOG_GET_TIMER 0x25
#define IPMI_WDOG_TIMER_NOT_INIT_RESP 0x80
/* These are here until the real ones get into the watchdog.h interface. */
#ifndef WDIOC_GETTIMEOUT
#define WDIOC_GETTIMEOUT _IOW(WATCHDOG_IOCTL_BASE, 20, int)
@ -596,6 +598,7 @@ static int ipmi_heartbeat(void)
struct kernel_ipmi_msg msg;
int rv;
struct ipmi_system_interface_addr addr;
int timeout_retries = 0;
if (ipmi_ignore_heartbeat)
return 0;
@ -616,6 +619,7 @@ static int ipmi_heartbeat(void)
mutex_lock(&heartbeat_lock);
restart:
atomic_set(&heartbeat_tofree, 2);
/*
@ -653,7 +657,33 @@ static int ipmi_heartbeat(void)
/* Wait for the heartbeat to be sent. */
wait_for_completion(&heartbeat_wait);
if (heartbeat_recv_msg.msg.data[0] != 0) {
if (heartbeat_recv_msg.msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP) {
timeout_retries++;
if (timeout_retries > 3) {
printk(KERN_ERR PFX ": Unable to restore the IPMI"
" watchdog's settings, giving up.\n");
rv = -EIO;
goto out_unlock;
}
/*
* The timer was not initialized, that means the BMC was
* probably reset and lost the watchdog information. Attempt
* to restore the timer's info. Note that we still hold
* the heartbeat lock, to keep a heartbeat from happening
* in this process, so must say no heartbeat to avoid a
* deadlock on this mutex.
*/
rv = ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB);
if (rv) {
printk(KERN_ERR PFX ": Unable to send the command to"
" set the watchdog's settings, giving up.\n");
goto out_unlock;
}
/* We might need a new heartbeat, so do it now */
goto restart;
} else if (heartbeat_recv_msg.msg.data[0] != 0) {
/*
* Got an error in the heartbeat response. It was already
* reported in ipmi_wdog_msg_handler, but we should return
@ -662,6 +692,7 @@ static int ipmi_heartbeat(void)
rv = -EINVAL;
}
out_unlock:
mutex_unlock(&heartbeat_lock);
return rv;
@ -922,11 +953,15 @@ static struct miscdevice ipmi_wdog_miscdev = {
static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg,
void *handler_data)
{
if (msg->msg.data[0] != 0) {
if (msg->msg.cmd == IPMI_WDOG_RESET_TIMER &&
msg->msg.data[0] == IPMI_WDOG_TIMER_NOT_INIT_RESP)
printk(KERN_INFO PFX "response: The IPMI controller appears"
" to have been reset, will attempt to reinitialize"
" the watchdog timer\n");
else if (msg->msg.data[0] != 0)
printk(KERN_ERR PFX "response: Error %x on cmd %x\n",
msg->msg.data[0],
msg->msg.cmd);
}
ipmi_free_recv_msg(msg);
}

Просмотреть файл

@ -746,6 +746,37 @@ static void __exit ibft_exit(void)
ibft_cleanup();
}
#ifdef CONFIG_ACPI
static const struct {
char *sign;
} ibft_signs[] = {
/*
* One spec says "IBFT", the other says "iBFT". We have to check
* for both.
*/
{ ACPI_SIG_IBFT },
{ "iBFT" },
};
static void __init acpi_find_ibft_region(void)
{
int i;
struct acpi_table_header *table = NULL;
if (acpi_disabled)
return;
for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++) {
acpi_get_table(ibft_signs[i].sign, 0, &table);
ibft_addr = (struct acpi_table_ibft *)table;
}
}
#else
static void __init acpi_find_ibft_region(void)
{
}
#endif
/*
* ibft_init() - creates sysfs tree entries for the iBFT data.
*/
@ -753,9 +784,16 @@ static int __init ibft_init(void)
{
int rc = 0;
/*
As on UEFI systems the setup_arch()/find_ibft_region()
is called before ACPI tables are parsed and it only does
legacy finding.
*/
if (!ibft_addr)
acpi_find_ibft_region();
if (ibft_addr) {
printk(KERN_INFO "iBFT detected at 0x%llx.\n",
(u64)isa_virt_to_bus(ibft_addr));
pr_info("iBFT detected.\n");
rc = ibft_check_device();
if (rc)

Просмотреть файл

@ -45,13 +45,6 @@ EXPORT_SYMBOL_GPL(ibft_addr);
static const struct {
char *sign;
} ibft_signs[] = {
#ifdef CONFIG_ACPI
/*
* One spec says "IBFT", the other says "iBFT". We have to check
* for both.
*/
{ ACPI_SIG_IBFT },
#endif
{ "iBFT" },
{ "BIFT" }, /* Broadcom iSCSI Offload */
};
@ -62,14 +55,6 @@ static const struct {
#define VGA_MEM 0xA0000 /* VGA buffer */
#define VGA_SIZE 0x20000 /* 128kB */
#ifdef CONFIG_ACPI
static int __init acpi_find_ibft(struct acpi_table_header *header)
{
ibft_addr = (struct acpi_table_ibft *)header;
return 0;
}
#endif /* CONFIG_ACPI */
static int __init find_ibft_in_mem(void)
{
unsigned long pos;
@ -94,6 +79,7 @@ static int __init find_ibft_in_mem(void)
* the table cannot be valid. */
if (pos + len <= (IBFT_END-1)) {
ibft_addr = (struct acpi_table_ibft *)virt;
pr_info("iBFT found at 0x%lx.\n", pos);
goto done;
}
}
@ -108,20 +94,12 @@ done:
*/
unsigned long __init find_ibft_region(unsigned long *sizep)
{
#ifdef CONFIG_ACPI
int i;
#endif
ibft_addr = NULL;
#ifdef CONFIG_ACPI
for (i = 0; i < ARRAY_SIZE(ibft_signs) && !ibft_addr; i++)
acpi_table_parse(ibft_signs[i].sign, acpi_find_ibft);
#endif /* CONFIG_ACPI */
/* iBFT 1.03 section 1.4.3.1 mandates that UEFI machines will
* only use ACPI for this */
if (!ibft_addr && !efi_enabled)
if (!efi_enabled)
find_ibft_in_mem();
if (ibft_addr) {

Просмотреть файл

@ -138,6 +138,7 @@ config GPIO_MXS
config GPIO_PL061
bool "PrimeCell PL061 GPIO support"
depends on ARM_AMBA
select GENERIC_IRQ_CHIP
help
Say yes here to support the PrimeCell PL061 GPIO device

Просмотреть файл

@ -22,7 +22,6 @@
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/reg.h>
#include <linux/mfd/da9052/pdata.h>
#include <linux/mfd/da9052/gpio.h>
#define DA9052_INPUT 1
#define DA9052_OUTPUT_OPENDRAIN 2
@ -43,6 +42,9 @@
#define DA9052_GPIO_MASK_UPPER_NIBBLE 0xF0
#define DA9052_GPIO_MASK_LOWER_NIBBLE 0x0F
#define DA9052_GPIO_NIBBLE_SHIFT 4
#define DA9052_IRQ_GPI0 16
#define DA9052_GPIO_ODD_SHIFT 7
#define DA9052_GPIO_EVEN_SHIFT 3
struct da9052_gpio {
struct da9052 *da9052;
@ -104,33 +106,26 @@ static int da9052_gpio_get(struct gpio_chip *gc, unsigned offset)
static void da9052_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
{
struct da9052_gpio *gpio = to_da9052_gpio(gc);
unsigned char register_value = 0;
int ret;
if (da9052_gpio_port_odd(offset)) {
if (value) {
register_value = DA9052_GPIO_ODD_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_ODD_PORT_MODE,
register_value);
value << DA9052_GPIO_ODD_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio odd reg,%d",
ret);
}
} else {
if (value) {
register_value = DA9052_GPIO_EVEN_PORT_MODE;
ret = da9052_reg_update(gpio->da9052, (offset >> 1) +
DA9052_GPIO_0_1_REG,
DA9052_GPIO_EVEN_PORT_MODE,
register_value);
value << DA9052_GPIO_EVEN_SHIFT);
if (ret != 0)
dev_err(gpio->da9052->dev,
"Failed to updated gpio even reg,%d",
ret);
}
}
}
@ -201,9 +196,9 @@ static struct gpio_chip reference_gp __devinitdata = {
.direction_input = da9052_gpio_direction_input,
.direction_output = da9052_gpio_direction_output,
.to_irq = da9052_gpio_to_irq,
.can_sleep = 1;
.ngpio = 16;
.base = -1;
.can_sleep = 1,
.ngpio = 16,
.base = -1,
};
static int __devinit da9052_gpio_probe(struct platform_device *pdev)

Просмотреть файл

@ -332,6 +332,34 @@ static void ioh_irq_mask(struct irq_data *d)
&chip->reg->regs[chip->ch].imask);
}
static void ioh_irq_disable(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct ioh_gpio *chip = gc->private;
unsigned long flags;
u32 ien;
spin_lock_irqsave(&chip->spinlock, flags);
ien = ioread32(&chip->reg->regs[chip->ch].ien);
ien &= ~(1 << (d->irq - chip->irq_base));
iowrite32(ien, &chip->reg->regs[chip->ch].ien);
spin_unlock_irqrestore(&chip->spinlock, flags);
}
static void ioh_irq_enable(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct ioh_gpio *chip = gc->private;
unsigned long flags;
u32 ien;
spin_lock_irqsave(&chip->spinlock, flags);
ien = ioread32(&chip->reg->regs[chip->ch].ien);
ien |= 1 << (d->irq - chip->irq_base);
iowrite32(ien, &chip->reg->regs[chip->ch].ien);
spin_unlock_irqrestore(&chip->spinlock, flags);
}
static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
{
struct ioh_gpio *chip = dev_id;
@ -339,7 +367,7 @@ static irqreturn_t ioh_gpio_handler(int irq, void *dev_id)
int i, j;
int ret = IRQ_NONE;
for (i = 0; i < 8; i++) {
for (i = 0; i < 8; i++, chip++) {
reg_val = ioread32(&chip->reg->regs[i].istatus);
for (j = 0; j < num_ports[i]; j++) {
if (reg_val & BIT(j)) {
@ -370,6 +398,8 @@ static __devinit void ioh_gpio_alloc_generic_chip(struct ioh_gpio *chip,
ct->chip.irq_mask = ioh_irq_mask;
ct->chip.irq_unmask = ioh_irq_unmask;
ct->chip.irq_set_type = ioh_irq_type;
ct->chip.irq_disable = ioh_irq_disable;
ct->chip.irq_enable = ioh_irq_enable;
irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
IRQ_NOREQUEST | IRQ_NOPROBE, 0);

Просмотреть файл

@ -132,6 +132,15 @@ static int mpc8xxx_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val
return 0;
}
static int mpc5121_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
{
/* GPIO 28..31 are input only on MPC5121 */
if (gpio >= 28)
return -EINVAL;
return mpc8xxx_gpio_dir_out(gc, gpio, val);
}
static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct of_mm_gpio_chip *mm = to_of_mm_gpio_chip(gc);
@ -340,11 +349,10 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
mm_gc->save_regs = mpc8xxx_gpio_save_regs;
gc->ngpio = MPC8XXX_GPIO_PINS;
gc->direction_input = mpc8xxx_gpio_dir_in;
gc->direction_output = mpc8xxx_gpio_dir_out;
if (of_device_is_compatible(np, "fsl,mpc8572-gpio"))
gc->get = mpc8572_gpio_get;
else
gc->get = mpc8xxx_gpio_get;
gc->direction_output = of_device_is_compatible(np, "fsl,mpc5121-gpio") ?
mpc5121_gpio_dir_out : mpc8xxx_gpio_dir_out;
gc->get = of_device_is_compatible(np, "fsl,mpc8572-gpio") ?
mpc8572_gpio_get : mpc8xxx_gpio_get;
gc->set = mpc8xxx_gpio_set;
gc->to_irq = mpc8xxx_gpio_to_irq;

Просмотреть файл

@ -12,7 +12,6 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/irq.h>
@ -24,6 +23,7 @@
#include <linux/amba/pl061.h>
#include <linux/slab.h>
#include <linux/pm.h>
#include <asm/mach/irq.h>
#define GPIODIR 0x400
#define GPIOIS 0x404
@ -48,23 +48,16 @@ struct pl061_context_save_regs {
#endif
struct pl061_gpio {
/* We use a list of pl061_gpio structs for each trigger IRQ in the main
* interrupts controller of the system. We need this to support systems
* in which more that one PL061s are connected to the same IRQ. The ISR
* interates through this list to find the source of the interrupt.
*/
struct list_head list;
/* Each of the two spinlocks protects a different set of hardware
* regiters and data structurs. This decouples the code of the IRQ from
* the GPIO code. This also makes the case of a GPIO routine call from
* the IRQ code simpler.
*/
spinlock_t lock; /* GPIO registers */
spinlock_t irq_lock; /* IRQ registers */
void __iomem *base;
unsigned irq_base;
int irq_base;
struct irq_chip_generic *irq_gc;
struct gpio_chip gc;
#ifdef CONFIG_PM
@ -134,46 +127,16 @@ static int pl061_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct pl061_gpio *chip = container_of(gc, struct pl061_gpio, gc);
if (chip->irq_base == NO_IRQ)
if (chip->irq_base <= 0)
return -EINVAL;
return chip->irq_base + offset;
}
/*
* PL061 GPIO IRQ
*/
static void pl061_irq_disable(struct irq_data *d)
{
struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
int offset = d->irq - chip->irq_base;
unsigned long flags;
u8 gpioie;
spin_lock_irqsave(&chip->irq_lock, flags);
gpioie = readb(chip->base + GPIOIE);
gpioie &= ~(1 << offset);
writeb(gpioie, chip->base + GPIOIE);
spin_unlock_irqrestore(&chip->irq_lock, flags);
}
static void pl061_irq_enable(struct irq_data *d)
{
struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
int offset = d->irq - chip->irq_base;
unsigned long flags;
u8 gpioie;
spin_lock_irqsave(&chip->irq_lock, flags);
gpioie = readb(chip->base + GPIOIE);
gpioie |= 1 << offset;
writeb(gpioie, chip->base + GPIOIE);
spin_unlock_irqrestore(&chip->irq_lock, flags);
}
static int pl061_irq_type(struct irq_data *d, unsigned trigger)
{
struct pl061_gpio *chip = irq_data_get_irq_chip_data(d);
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct pl061_gpio *chip = gc->private;
int offset = d->irq - chip->irq_base;
unsigned long flags;
u8 gpiois, gpioibe, gpioiev;
@ -181,7 +144,7 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
if (offset < 0 || offset >= PL061_GPIO_NR)
return -EINVAL;
spin_lock_irqsave(&chip->irq_lock, flags);
raw_spin_lock_irqsave(&gc->lock, flags);
gpioiev = readb(chip->base + GPIOIEV);
@ -210,53 +173,54 @@ static int pl061_irq_type(struct irq_data *d, unsigned trigger)
writeb(gpioiev, chip->base + GPIOIEV);
spin_unlock_irqrestore(&chip->irq_lock, flags);
raw_spin_unlock_irqrestore(&gc->lock, flags);
return 0;
}
static struct irq_chip pl061_irqchip = {
.name = "GPIO",
.irq_enable = pl061_irq_enable,
.irq_disable = pl061_irq_disable,
.irq_set_type = pl061_irq_type,
};
static void pl061_irq_handler(unsigned irq, struct irq_desc *desc)
{
struct list_head *chip_list = irq_get_handler_data(irq);
struct list_head *ptr;
struct pl061_gpio *chip;
unsigned long pending;
int offset;
struct pl061_gpio *chip = irq_desc_get_handler_data(desc);
struct irq_chip *irqchip = irq_desc_get_chip(desc);
desc->irq_data.chip->irq_ack(&desc->irq_data);
list_for_each(ptr, chip_list) {
unsigned long pending;
int offset;
chip = list_entry(ptr, struct pl061_gpio, list);
pending = readb(chip->base + GPIOMIS);
writeb(pending, chip->base + GPIOIC);
if (pending == 0)
continue;
chained_irq_enter(irqchip, desc);
pending = readb(chip->base + GPIOMIS);
writeb(pending, chip->base + GPIOIC);
if (pending) {
for_each_set_bit(offset, &pending, PL061_GPIO_NR)
generic_handle_irq(pl061_to_irq(&chip->gc, offset));
}
desc->irq_data.chip->irq_unmask(&desc->irq_data);
chained_irq_exit(irqchip, desc);
}
static void __init pl061_init_gc(struct pl061_gpio *chip, int irq_base)
{
struct irq_chip_type *ct;
chip->irq_gc = irq_alloc_generic_chip("gpio-pl061", 1, irq_base,
chip->base, handle_simple_irq);
chip->irq_gc->private = chip;
ct = chip->irq_gc->chip_types;
ct->chip.irq_mask = irq_gc_mask_clr_bit;
ct->chip.irq_unmask = irq_gc_mask_set_bit;
ct->chip.irq_set_type = pl061_irq_type;
ct->chip.irq_set_wake = irq_gc_set_wake;
ct->regs.mask = GPIOIE;
irq_setup_generic_chip(chip->irq_gc, IRQ_MSK(PL061_GPIO_NR),
IRQ_GC_INIT_NESTED_LOCK, IRQ_NOREQUEST, 0);
}
static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
{
struct pl061_platform_data *pdata;
struct pl061_gpio *chip;
struct list_head *chip_list;
int ret, irq, i;
static DECLARE_BITMAP(init_irq, NR_IRQS);
pdata = dev->dev.platform_data;
if (pdata == NULL)
return -ENODEV;
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL)
@ -268,7 +232,7 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
chip->irq_base = pdata->irq_base;
} else if (dev->dev.of_node) {
chip->gc.base = -1;
chip->irq_base = NO_IRQ;
chip->irq_base = 0;
} else {
ret = -ENODEV;
goto free_mem;
@ -287,8 +251,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
}
spin_lock_init(&chip->lock);
spin_lock_init(&chip->irq_lock);
INIT_LIST_HEAD(&chip->list);
chip->gc.direction_input = pl061_direction_input;
chip->gc.direction_output = pl061_direction_output;
@ -308,9 +270,11 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
* irq_chip support
*/
if (chip->irq_base == NO_IRQ)
if (chip->irq_base <= 0)
return 0;
pl061_init_gc(chip, chip->irq_base);
writeb(0, chip->base + GPIOIE); /* disable irqs */
irq = dev->irq[0];
if (irq < 0) {
@ -318,18 +282,7 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
goto iounmap;
}
irq_set_chained_handler(irq, pl061_irq_handler);
if (!test_and_set_bit(irq, init_irq)) { /* list initialized? */
chip_list = kmalloc(sizeof(*chip_list), GFP_KERNEL);
if (chip_list == NULL) {
clear_bit(irq, init_irq);
ret = -ENOMEM;
goto iounmap;
}
INIT_LIST_HEAD(chip_list);
irq_set_handler_data(irq, chip_list);
} else
chip_list = irq_get_handler_data(irq);
list_add(&chip->list, chip_list);
irq_set_handler_data(irq, chip);
for (i = 0; i < PL061_GPIO_NR; i++) {
if (pdata) {
@ -339,11 +292,6 @@ static int pl061_probe(struct amba_device *dev, const struct amba_id *id)
else
pl061_direction_input(&chip->gc, i);
}
irq_set_chip_and_handler(i + chip->irq_base, &pl061_irqchip,
handle_simple_irq);
set_irq_flags(i+chip->irq_base, IRQF_VALID);
irq_set_chip_data(i + chip->irq_base, chip);
}
amba_set_drvdata(dev, chip);

Просмотреть файл

@ -62,6 +62,7 @@ static int i915_capabilities(struct seq_file *m, void *data)
const struct intel_device_info *info = INTEL_INFO(dev);
seq_printf(m, "gen: %d\n", info->gen);
seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev));
#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
B(is_mobile);
B(is_i85x);

Просмотреть файл

@ -1454,6 +1454,14 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
diff1 = now - dev_priv->last_time1;
/* Prevent division-by-zero if we are asking too fast.
* Also, we don't get interesting results if we are polling
* faster than once in 10ms, so just return the saved value
* in such cases.
*/
if (diff1 <= 10)
return dev_priv->chipset_power;
count1 = I915_READ(DMIEC);
count2 = I915_READ(DDREC);
count3 = I915_READ(CSIEC);
@ -1484,6 +1492,8 @@ unsigned long i915_chipset_val(struct drm_i915_private *dev_priv)
dev_priv->last_count1 = total_count;
dev_priv->last_time1 = now;
dev_priv->chipset_power = ret;
return ret;
}

Просмотреть файл

@ -58,15 +58,15 @@ module_param_named(powersave, i915_powersave, int, 0600);
MODULE_PARM_DESC(powersave,
"Enable powersavings, fbc, downclocking, etc. (default: true)");
unsigned int i915_semaphores __read_mostly = 0;
int i915_semaphores __read_mostly = -1;
module_param_named(semaphores, i915_semaphores, int, 0600);
MODULE_PARM_DESC(semaphores,
"Use semaphores for inter-ring sync (default: false)");
"Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))");
unsigned int i915_enable_rc6 __read_mostly = 0;
int i915_enable_rc6 __read_mostly = -1;
module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
MODULE_PARM_DESC(i915_enable_rc6,
"Enable power-saving render C-state 6 (default: true)");
"Enable power-saving render C-state 6 (default: -1 (use per-chip default)");
int i915_enable_fbc __read_mostly = -1;
module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
@ -328,7 +328,7 @@ void intel_detect_pch(struct drm_device *dev)
}
}
static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
{
int count;
@ -344,6 +344,22 @@ static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
udelay(10);
}
void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
{
int count;
count = 0;
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1))
udelay(10);
I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1);
POSTING_READ(FORCEWAKE_MT);
count = 0;
while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0)
udelay(10);
}
/*
* Generally this is called implicitly by the register read function. However,
* if some sequence requires the GT to not power down then this function should
@ -356,15 +372,21 @@ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
/* Forcewake is atomic in case we get in here without the lock */
if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
__gen6_gt_force_wake_get(dev_priv);
dev_priv->display.force_wake_get(dev_priv);
}
static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE, 0);
POSTING_READ(FORCEWAKE);
}
void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
{
I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0);
POSTING_READ(FORCEWAKE_MT);
}
/*
* see gen6_gt_force_wake_get()
*/
@ -373,7 +395,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
if (atomic_dec_and_test(&dev_priv->forcewake_count))
__gen6_gt_force_wake_put(dev_priv);
dev_priv->display.force_wake_put(dev_priv);
}
void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@ -903,8 +925,9 @@ MODULE_LICENSE("GPL and additional rights");
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
((reg) < 0x40000) && \
((reg) != FORCEWAKE))
((reg) < 0x40000) && \
((reg) != FORCEWAKE) && \
((reg) != ECOBUS))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \

Просмотреть файл

@ -107,6 +107,7 @@ struct opregion_header;
struct opregion_acpi;
struct opregion_swsci;
struct opregion_asle;
struct drm_i915_private;
struct intel_opregion {
struct opregion_header *header;
@ -221,6 +222,8 @@ struct drm_i915_display_funcs {
struct drm_i915_gem_object *obj);
int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb,
int x, int y);
void (*force_wake_get)(struct drm_i915_private *dev_priv);
void (*force_wake_put)(struct drm_i915_private *dev_priv);
/* clock updates for mode set */
/* cursor updates */
/* render clock increase/decrease */
@ -710,6 +713,7 @@ typedef struct drm_i915_private {
u64 last_count1;
unsigned long last_time1;
unsigned long chipset_power;
u64 last_count2;
struct timespec last_time2;
unsigned long gfx_power;
@ -998,11 +1002,11 @@ extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc __always_unused;
extern int i915_panel_ignore_lid __read_mostly;
extern unsigned int i915_powersave __read_mostly;
extern unsigned int i915_semaphores __read_mostly;
extern int i915_semaphores __read_mostly;
extern unsigned int i915_lvds_downclock __read_mostly;
extern int i915_panel_use_ssc __read_mostly;
extern int i915_vbt_sdvo_panel_type __read_mostly;
extern unsigned int i915_enable_rc6 __read_mostly;
extern int i915_enable_rc6 __read_mostly;
extern int i915_enable_fbc __read_mostly;
extern bool i915_enable_hangcheck __read_mostly;
@ -1308,6 +1312,11 @@ extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch(struct drm_device *dev);
extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
extern void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv);
extern void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv);
/* overlay */
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
@ -1352,8 +1361,9 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
/* We give fast paths for the really cool registers */
#define NEEDS_FORCE_WAKE(dev_priv, reg) \
(((dev_priv)->info->gen >= 6) && \
((reg) < 0x40000) && \
((reg) != FORCEWAKE))
((reg) < 0x40000) && \
((reg) != FORCEWAKE) && \
((reg) != ECOBUS))
#define __i915_read(x, y) \
u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);

Просмотреть файл

@ -2026,13 +2026,8 @@ i915_wait_request(struct intel_ring_buffer *ring,
* to handle this, the waiter on a request often wants an associated
* buffer to have made it to the inactive list, and we would need
* a separate wait queue to handle that.
*
* To avoid a recursion with the ilk VT-d workaround (that calls
* gpu_idle when unbinding objects with interruptible==false) don't
* retire requests in that case (because it might call unbind if the
* active list holds the last reference to the object).
*/
if (ret == 0 && dev_priv->mm.interruptible)
if (ret == 0)
i915_gem_retire_requests_ring(ring);
return ret;

Просмотреть файл

@ -32,6 +32,7 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"
#include <linux/dma_remapping.h>
struct change_domains {
uint32_t invalidate_domains;
@ -746,6 +747,22 @@ i915_gem_execbuffer_flush(struct drm_device *dev,
return 0;
}
static bool
intel_enable_semaphores(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
return 0;
if (i915_semaphores >= 0)
return i915_semaphores;
/* Enable semaphores on SNB when IO remapping is off */
if (INTEL_INFO(dev)->gen == 6)
return !intel_iommu_enabled;
return 1;
}
static int
i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *to)
@ -758,7 +775,7 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
return 0;
/* XXX gpu semaphores are implicated in various hard hangs on SNB */
if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores)
if (!intel_enable_semaphores(obj->base.dev))
return i915_gem_object_wait_rendering(obj);
idx = intel_ring_sync_index(from, to);

Просмотреть файл

@ -3303,10 +3303,10 @@
/* or SDVOB */
#define HDMIB 0xe1140
#define PORT_ENABLE (1 << 31)
#define TRANSCODER_A (0)
#define TRANSCODER_B (1 << 30)
#define TRANSCODER(pipe) ((pipe) << 30)
#define TRANSCODER_MASK (1 << 30)
#define TRANSCODER(pipe) ((pipe) << 30)
#define TRANSCODER_CPT(pipe) ((pipe) << 29)
#define TRANSCODER_MASK (1 << 30)
#define TRANSCODER_MASK_CPT (3 << 29)
#define COLOR_FORMAT_8bpc (0)
#define COLOR_FORMAT_12bpc (3 << 26)
#define SDVOB_HOTPLUG_ENABLE (1 << 23)
@ -3447,8 +3447,30 @@
#define EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
/* IVB */
#define EDP_LINK_TRAIN_400MV_0DB_IVB (0x24 <<22)
#define EDP_LINK_TRAIN_400MV_3_5DB_IVB (0x2a <<22)
#define EDP_LINK_TRAIN_400MV_6DB_IVB (0x2f <<22)
#define EDP_LINK_TRAIN_600MV_0DB_IVB (0x30 <<22)
#define EDP_LINK_TRAIN_600MV_3_5DB_IVB (0x36 <<22)
#define EDP_LINK_TRAIN_800MV_0DB_IVB (0x38 <<22)
#define EDP_LINK_TRAIN_800MV_3_5DB_IVB (0x33 <<22)
/* legacy values */
#define EDP_LINK_TRAIN_500MV_0DB_IVB (0x00 <<22)
#define EDP_LINK_TRAIN_1000MV_0DB_IVB (0x20 <<22)
#define EDP_LINK_TRAIN_500MV_3_5DB_IVB (0x02 <<22)
#define EDP_LINK_TRAIN_1000MV_3_5DB_IVB (0x22 <<22)
#define EDP_LINK_TRAIN_1000MV_6DB_IVB (0x23 <<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_IVB (0x3f<<22)
#define FORCEWAKE 0xA18C
#define FORCEWAKE_ACK 0x130090
#define FORCEWAKE_MT 0xa188 /* multi-threaded */
#define FORCEWAKE_MT_ACK 0x130040
#define ECOBUS 0xa180
#define FORCEWAKE_MT_ENABLE (1<<5)
#define GT_FIFO_FREE_ENTRIES 0x120008
#define GT_FIFO_NUM_RESERVED_ENTRIES 20

Просмотреть файл

@ -38,8 +38,8 @@
#include "i915_drv.h"
#include "i915_trace.h"
#include "drm_dp_helper.h"
#include "drm_crtc_helper.h"
#include <linux/dma_remapping.h>
#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
@ -4670,6 +4670,7 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
/**
* intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
* @crtc: CRTC structure
* @mode: requested mode
*
* A pipe may be connected to one or more outputs. Based on the depth of the
* attached framebuffer, choose a good color depth to use on the pipe.
@ -4681,13 +4682,15 @@ static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
* HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
* Displays may support a restricted set as well, check EDID and clamp as
* appropriate.
* DP may want to dither down to 6bpc to fit larger modes
*
* RETURNS:
* Dithering requirement (i.e. false if display bpc and pipe bpc match,
* true if they don't match).
*/
static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
unsigned int *pipe_bpp)
unsigned int *pipe_bpp,
struct drm_display_mode *mode)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@ -4758,6 +4761,11 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
}
}
if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
display_bpc = 6;
}
/*
* We could just drive the pipe at the highest bpc all the time and
* enable dithering as needed, but that costs bandwidth. So choose
@ -5019,6 +5027,16 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
pipeconf &= ~PIPECONF_DOUBLE_WIDE;
}
/* default to 8bpc */
pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
if (is_dp) {
if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
pipeconf |= PIPECONF_BPP_6 |
PIPECONF_DITHER_EN |
PIPECONF_DITHER_TYPE_SP;
}
}
dpll |= DPLL_VCO_ENABLE;
DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
@ -5480,7 +5498,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
/* determine panel color depth */
temp = I915_READ(PIPECONF(pipe));
temp &= ~PIPE_BPC_MASK;
dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp);
dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
switch (pipe_bpp) {
case 18:
temp |= PIPE_6BPC;
@ -7189,11 +7207,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
work->old_fb_obj = intel_fb->obj;
INIT_WORK(&work->work, intel_unpin_work_fn);
ret = drm_vblank_get(dev, intel_crtc->pipe);
if (ret)
goto free_work;
/* We borrow the event spin lock for protecting unpin_work */
spin_lock_irqsave(&dev->event_lock, flags);
if (intel_crtc->unpin_work) {
spin_unlock_irqrestore(&dev->event_lock, flags);
kfree(work);
drm_vblank_put(dev, intel_crtc->pipe);
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
return -EBUSY;
@ -7212,10 +7235,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
crtc->fb = fb;
ret = drm_vblank_get(dev, intel_crtc->pipe);
if (ret)
goto cleanup_objs;
work->pending_flip_obj = obj;
work->enable_stall_check = true;
@ -7238,7 +7257,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
cleanup_pending:
atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
cleanup_objs:
drm_gem_object_unreference(&work->old_fb_obj->base);
drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
@ -7247,6 +7265,8 @@ cleanup_objs:
intel_crtc->unpin_work = NULL;
spin_unlock_irqrestore(&dev->event_lock, flags);
drm_vblank_put(dev, intel_crtc->pipe);
free_work:
kfree(work);
return ret;
@ -7887,6 +7907,33 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
static bool intel_enable_rc6(struct drm_device *dev)
{
/*
* Respect the kernel parameter if it is set
*/
if (i915_enable_rc6 >= 0)
return i915_enable_rc6;
/*
* Disable RC6 on Ironlake
*/
if (INTEL_INFO(dev)->gen == 5)
return 0;
/*
* Enable rc6 on Sandybridge if DMA remapping is disabled
*/
if (INTEL_INFO(dev)->gen == 6) {
DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n",
intel_iommu_enabled ? "true" : "false",
!intel_iommu_enabled ? "en" : "dis");
return !intel_iommu_enabled;
}
DRM_DEBUG_DRIVER("RC6 enabled\n");
return 1;
}
void gen6_enable_rps(struct drm_i915_private *dev_priv)
{
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
@ -7923,7 +7970,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv)
I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
if (i915_enable_rc6)
if (intel_enable_rc6(dev_priv->dev))
rc6_mask = GEN6_RC_CTL_RC6p_ENABLE |
GEN6_RC_CTL_RC6_ENABLE;
@ -8372,7 +8419,7 @@ void ironlake_enable_rc6(struct drm_device *dev)
/* rc6 disabled by default due to repeated reports of hanging during
* boot and resume.
*/
if (!i915_enable_rc6)
if (!intel_enable_rc6(dev))
return;
mutex_lock(&dev->struct_mutex);
@ -8491,6 +8538,28 @@ static void intel_init_display(struct drm_device *dev)
/* For FIFO watermark updates */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.force_wake_get = __gen6_gt_force_wake_get;
dev_priv->display.force_wake_put = __gen6_gt_force_wake_put;
/* IVB configs may use multi-threaded forcewake */
if (IS_IVYBRIDGE(dev)) {
u32 ecobus;
mutex_lock(&dev->struct_mutex);
__gen6_gt_force_wake_mt_get(dev_priv);
ecobus = I915_READ(ECOBUS);
__gen6_gt_force_wake_mt_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
if (ecobus & FORCEWAKE_MT_ENABLE) {
DRM_DEBUG_KMS("Using MT version of forcewake\n");
dev_priv->display.force_wake_get =
__gen6_gt_force_wake_mt_get;
dev_priv->display.force_wake_put =
__gen6_gt_force_wake_mt_put;
}
}
if (HAS_PCH_IBX(dev))
dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating;
else if (HAS_PCH_CPT(dev))

Просмотреть файл

@ -208,13 +208,15 @@ intel_dp_link_clock(uint8_t link_bw)
*/
static int
intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock)
intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock, int check_bpp)
{
struct drm_crtc *crtc = intel_dp->base.base.crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int bpp = 24;
if (intel_crtc)
if (check_bpp)
bpp = check_bpp;
else if (intel_crtc)
bpp = intel_crtc->bpp;
return (pixel_clock * bpp + 9) / 10;
@ -233,6 +235,7 @@ intel_dp_mode_valid(struct drm_connector *connector,
struct intel_dp *intel_dp = intel_attached_dp(connector);
int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp));
int max_lanes = intel_dp_max_lane_count(intel_dp);
int max_rate, mode_rate;
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay)
@ -242,9 +245,17 @@ intel_dp_mode_valid(struct drm_connector *connector,
return MODE_PANEL;
}
if (intel_dp_link_required(intel_dp, mode->clock)
> intel_dp_max_data_rate(max_link_clock, max_lanes))
return MODE_CLOCK_HIGH;
mode_rate = intel_dp_link_required(intel_dp, mode->clock, 0);
max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
if (mode_rate > max_rate) {
mode_rate = intel_dp_link_required(intel_dp,
mode->clock, 18);
if (mode_rate > max_rate)
return MODE_CLOCK_HIGH;
else
mode->private_flags |= INTEL_MODE_DP_FORCE_6BPC;
}
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
@ -362,8 +373,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
* clock divider.
*/
if (is_cpu_edp(intel_dp)) {
if (IS_GEN6(dev))
aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */
if (IS_GEN6(dev) || IS_GEN7(dev))
aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */
else
aux_clock_divider = 225; /* eDP input clock at 450Mhz */
} else if (HAS_PCH_SPLIT(dev))
@ -672,6 +683,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
int lane_count, clock;
int max_lane_count = intel_dp_max_lane_count(intel_dp);
int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
int bpp = mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 0;
static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) {
@ -689,7 +701,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
if (intel_dp_link_required(intel_dp, mode->clock)
if (intel_dp_link_required(intel_dp, mode->clock, bpp)
<= link_avail) {
intel_dp->link_bw = bws[clock];
intel_dp->lane_count = lane_count;
@ -817,10 +829,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
/*
* There are three kinds of DP registers:
* There are four kinds of DP registers:
*
* IBX PCH
* CPU
* SNB CPU
* IVB CPU
* CPT PCH
*
* IBX PCH and CPU are the same for almost everything,
@ -873,7 +886,25 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/* Split out the IBX/CPU vs CPT settings */
if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) {
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
intel_dp->DP |= DP_SYNC_HS_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
intel_dp->DP |= DP_SYNC_VS_HIGH;
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
intel_dp->DP |= DP_ENHANCED_FRAMING;
intel_dp->DP |= intel_crtc->pipe << 29;
/* don't miss out required setting for eDP */
intel_dp->DP |= DP_PLL_ENABLE;
if (adjusted_mode->clock < 200000)
intel_dp->DP |= DP_PLL_FREQ_160MHZ;
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@ -1375,34 +1406,59 @@ static char *link_train_names[] = {
* These are source-specific values; current Intel hardware supports
* a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
*/
#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200
static uint8_t
intel_dp_pre_emphasis_max(uint8_t voltage_swing)
intel_dp_voltage_max(struct intel_dp *intel_dp)
{
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_1200:
default:
return DP_TRAIN_PRE_EMPHASIS_0;
struct drm_device *dev = intel_dp->base.base.dev;
if (IS_GEN7(dev) && is_cpu_edp(intel_dp))
return DP_TRAIN_VOLTAGE_SWING_800;
else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
return DP_TRAIN_VOLTAGE_SWING_1200;
else
return DP_TRAIN_VOLTAGE_SWING_800;
}
static uint8_t
intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
{
struct drm_device *dev = intel_dp->base.base.dev;
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_600:
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
default:
return DP_TRAIN_PRE_EMPHASIS_0;
}
} else {
switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_600:
return DP_TRAIN_PRE_EMPHASIS_6;
case DP_TRAIN_VOLTAGE_SWING_800:
return DP_TRAIN_PRE_EMPHASIS_3_5;
case DP_TRAIN_VOLTAGE_SWING_1200:
default:
return DP_TRAIN_PRE_EMPHASIS_0;
}
}
}
static void
intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
{
struct drm_device *dev = intel_dp->base.base.dev;
uint8_t v = 0;
uint8_t p = 0;
int lane;
uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
int voltage_max;
uint8_t voltage_max;
uint8_t preemph_max;
for (lane = 0; lane < intel_dp->lane_count; lane++) {
uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
@ -1414,15 +1470,13 @@ intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_ST
p = this_p;
}
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
voltage_max = I830_DP_VOLTAGE_MAX_CPT;
else
voltage_max = I830_DP_VOLTAGE_MAX;
voltage_max = intel_dp_voltage_max(intel_dp);
if (v >= voltage_max)
v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
if (p >= intel_dp_pre_emphasis_max(v))
p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
if (p >= preemph_max)
p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
@ -1494,6 +1548,37 @@ intel_gen6_edp_signal_levels(uint8_t train_set)
}
}
/* Gen7's DP voltage swing and pre-emphasis control */
static uint32_t
intel_gen7_edp_signal_levels(uint8_t train_set)
{
int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
DP_TRAIN_PRE_EMPHASIS_MASK);
switch (signal_levels) {
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_400MV_0DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
return EDP_LINK_TRAIN_400MV_6DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_600MV_0DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
return EDP_LINK_TRAIN_800MV_0DB_IVB;
case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
default:
DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
"0x%x\n", signal_levels);
return EDP_LINK_TRAIN_500MV_0DB_IVB;
}
}
static uint8_t
intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
int lane)
@ -1599,7 +1684,8 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP &= ~DP_LINK_TRAIN_MASK_CPT;
else
DP &= ~DP_LINK_TRAIN_MASK;
@ -1613,7 +1699,11 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
uint8_t link_status[DP_LINK_STATUS_SIZE];
uint32_t signal_levels;
if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
@ -1622,7 +1712,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_1_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_1;
@ -1703,7 +1793,10 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
break;
}
if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels;
} else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
@ -1711,7 +1804,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_PAT_2_CPT;
else
reg = DP | DP_LINK_TRAIN_PAT_2;
@ -1752,7 +1845,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
++tries;
}
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
reg = DP | DP_LINK_TRAIN_OFF;
@ -1782,7 +1875,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
udelay(100);
}
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) {
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) {
DP &= ~DP_LINK_TRAIN_MASK_CPT;
I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
} else {
@ -1794,7 +1887,7 @@ intel_dp_link_down(struct intel_dp *intel_dp)
msleep(17);
if (is_edp(intel_dp)) {
if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp)))
DP |= DP_LINK_TRAIN_OFF_CPT;
else
DP |= DP_LINK_TRAIN_OFF;

Просмотреть файл

@ -110,6 +110,7 @@
/* drm_display_mode->private_flags */
#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
#define INTEL_MODE_DP_FORCE_6BPC (0x10)
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,

Просмотреть файл

@ -715,6 +715,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
},
},
{
.callback = intel_no_lvds_dmi_callback,
.ident = "Asus AT5NM10T-I",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "AT5NM10T-I"),
},
},
{ } /* terminating entry */
};

Просмотреть файл

@ -178,13 +178,10 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
max >>= 16;
} else {
if (IS_PINEVIEW(dev)) {
if (INTEL_INFO(dev)->gen < 4)
max >>= 17;
} else {
else
max >>= 16;
if (INTEL_INFO(dev)->gen < 4)
max &= ~1;
}
if (is_backlight_combination_mode(dev))
max *= 0xff;
@ -203,13 +200,12 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (IS_PINEVIEW(dev))
if (INTEL_INFO(dev)->gen < 4)
val >>= 1;
if (is_backlight_combination_mode(dev)) {
u8 lbpc;
val &= ~1;
pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
val *= lbpc;
}
@ -246,11 +242,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level
}
tmp = I915_READ(BLC_PWM_CTL);
if (IS_PINEVIEW(dev)) {
tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
if (INTEL_INFO(dev)->gen < 4)
level <<= 1;
} else
tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
I915_WRITE(BLC_PWM_CTL, tmp | level);
}

Просмотреть файл

@ -50,6 +50,7 @@
#define IS_TMDS(c) (c->output_flag & SDVO_TMDS_MASK)
#define IS_LVDS(c) (c->output_flag & SDVO_LVDS_MASK)
#define IS_TV_OR_LVDS(c) (c->output_flag & (SDVO_TV_MASK | SDVO_LVDS_MASK))
#define IS_DIGITAL(c) (c->output_flag & (SDVO_TMDS_MASK | SDVO_LVDS_MASK))
static const char *tv_format_names[] = {
@ -1086,8 +1087,12 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
}
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
if (INTEL_PCH_TYPE(dev) >= PCH_CPT)
sdvox |= TRANSCODER_CPT(intel_crtc->pipe);
else
sdvox |= TRANSCODER(intel_crtc->pipe);
if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
@ -1314,6 +1319,18 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
return status;
}
static bool
intel_sdvo_connector_matches_edid(struct intel_sdvo_connector *sdvo,
struct edid *edid)
{
bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
bool connector_is_digital = !!IS_DIGITAL(sdvo);
DRM_DEBUG_KMS("connector_is_digital? %d, monitor_is_digital? %d\n",
connector_is_digital, monitor_is_digital);
return connector_is_digital == monitor_is_digital;
}
static enum drm_connector_status
intel_sdvo_detect(struct drm_connector *connector, bool force)
{
@ -1358,10 +1375,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (edid == NULL)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
if (edid->input & DRM_EDID_INPUT_DIGITAL)
ret = connector_status_disconnected;
else
if (intel_sdvo_connector_matches_edid(intel_sdvo_connector,
edid))
ret = connector_status_connected;
else
ret = connector_status_disconnected;
connector->display_info.raw_edid = NULL;
kfree(edid);
} else
@ -1402,11 +1421,8 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
bool monitor_is_digital = !!(edid->input & DRM_EDID_INPUT_DIGITAL);
bool connector_is_digital = !!IS_TMDS(intel_sdvo_connector);
if (connector_is_digital == monitor_is_digital) {
if (intel_sdvo_connector_matches_edid(to_intel_sdvo_connector(connector),
edid)) {
drm_mode_connector_update_edid_property(connector, edid);
drm_add_edid_modes(connector, edid);
}

Просмотреть файл

@ -390,6 +390,11 @@ extern int vmw_context_check(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
int id,
struct vmw_resource **p_res);
extern int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_dma_buffer **out_buf);
extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_init(struct vmw_private *dev_priv,
struct vmw_surface *srf,

Просмотреть файл

@ -33,6 +33,7 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t fifo_min, hwversion;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
@ -41,7 +42,12 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
hwversion = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
hwversion = ioread32(fifo_mem +
((fifo->capabilities &
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
SVGA_FIFO_3D_HWVERSION_REVISED :
SVGA_FIFO_3D_HWVERSION));
if (hwversion == 0)
return false;

Просмотреть файл

@ -58,8 +58,14 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
param->value = ioread32(fifo_mem + SVGA_FIFO_3D_HWVERSION);
param->value =
ioread32(fifo_mem +
((fifo->capabilities &
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
SVGA_FIFO_3D_HWVERSION_REVISED :
SVGA_FIFO_3D_HWVERSION));
break;
}
default:
@ -166,13 +172,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
ret = -EINVAL;
goto out_no_fb;
}
vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
if (!vfb->dmabuf) {
DRM_ERROR("Framebuffer not dmabuf backed.\n");
ret = -EINVAL;
goto out_no_fb;
}
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))

Просмотреть файл

@ -31,6 +31,44 @@
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
struct vmw_clip_rect {
int x1, x2, y1, y2;
};
/**
* Clip @num_rects number of @rects against @clip storing the
* results in @out_rects and the number of passed rects in @out_num.
*/
void vmw_clip_cliprects(struct drm_clip_rect *rects,
int num_rects,
struct vmw_clip_rect clip,
SVGASignedRect *out_rects,
int *out_num)
{
int i, k;
for (i = 0, k = 0; i < num_rects; i++) {
int x1 = max_t(int, clip.x1, rects[i].x1);
int y1 = max_t(int, clip.y1, rects[i].y1);
int x2 = min_t(int, clip.x2, rects[i].x2);
int y2 = min_t(int, clip.y2, rects[i].y2);
if (x1 >= x2)
continue;
if (y1 >= y2)
continue;
out_rects[k].left = x1;
out_rects[k].top = y1;
out_rects[k].right = x2;
out_rects[k].bottom = y2;
k++;
}
*out_num = k;
}
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
if (du->cursor_surface)
@ -82,6 +120,43 @@ int vmw_cursor_update_image(struct vmw_private *dev_priv,
return 0;
}
int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
u32 width, u32 height,
u32 hotspotX, u32 hotspotY)
{
struct ttm_bo_kmap_obj map;
unsigned long kmap_offset;
unsigned long kmap_num;
void *virtual;
bool dummy;
int ret;
kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return -EINVAL;
}
ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
if (unlikely(ret != 0))
goto err_unreserve;
virtual = ttm_kmap_obj_virtual(&map, &dummy);
ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
hotspotX, hotspotY);
ttm_bo_kunmap(&map);
err_unreserve:
ttm_bo_unreserve(&dmabuf->base);
return ret;
}
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
@ -110,24 +185,21 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
return -EINVAL;
if (handle) {
ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
handle, &surface);
if (!ret) {
if (!surface->snooper.image) {
DRM_ERROR("surface not suitable for cursor\n");
vmw_surface_unreference(&surface);
return -EINVAL;
}
} else {
ret = vmw_user_dmabuf_lookup(tfile,
handle, &dmabuf);
if (ret) {
DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
return -EINVAL;
}
ret = vmw_user_lookup_handle(dev_priv, tfile,
handle, &surface, &dmabuf);
if (ret) {
DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
return -EINVAL;
}
}
/* need to do this before taking down old image */
if (surface && !surface->snooper.image) {
DRM_ERROR("surface not suitable for cursor\n");
vmw_surface_unreference(&surface);
return -EINVAL;
}
/* takedown old cursor */
if (du->cursor_surface) {
du->cursor_surface->snooper.crtc = NULL;
@ -146,36 +218,11 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
vmw_cursor_update_image(dev_priv, surface->snooper.image,
64, 64, du->hotspot_x, du->hotspot_y);
} else if (dmabuf) {
struct ttm_bo_kmap_obj map;
unsigned long kmap_offset;
unsigned long kmap_num;
void *virtual;
bool dummy;
/* vmw_user_surface_lookup takes one reference */
du->cursor_dmabuf = dmabuf;
kmap_offset = 0;
kmap_num = (64*64*4) >> PAGE_SHIFT;
ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return -EINVAL;
}
ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
if (unlikely(ret != 0))
goto err_unreserve;
virtual = ttm_kmap_obj_virtual(&map, &dummy);
vmw_cursor_update_image(dev_priv, virtual, 64, 64,
du->hotspot_x, du->hotspot_y);
ttm_bo_kunmap(&map);
err_unreserve:
ttm_bo_unreserve(&dmabuf->base);
ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
du->hotspot_x, du->hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
return 0;
@ -377,8 +424,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
struct drm_clip_rect *clips,
unsigned num_clips, int inc)
{
struct drm_clip_rect *clips_ptr;
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *clips_ptr;
struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, num_units;
@ -391,7 +439,6 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
} *cmd;
SVGASignedRect *blits;
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
@ -402,13 +449,24 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
BUG_ON(!clips || !num_clips);
tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
if (unlikely(tmp == NULL)) {
DRM_ERROR("Temporary cliprect memory alloc failed.\n");
return -ENOMEM;
}
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kzalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Temporary fifo memory alloc failed.\n");
return -ENOMEM;
ret = -ENOMEM;
goto out_free_tmp;
}
/* setup blits pointer */
blits = (SVGASignedRect *)&cmd[1];
/* initial clip region */
left = clips->x1;
right = clips->x2;
top = clips->y1;
@ -434,45 +492,60 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
cmd->body.srcRect.bottom = bottom;
clips_ptr = clips;
blits = (SVGASignedRect *)&cmd[1];
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
blits[i].left = clips_ptr->x1 - left;
blits[i].right = clips_ptr->x2 - left;
blits[i].top = clips_ptr->y1 - top;
blits[i].bottom = clips_ptr->y2 - top;
tmp[i].x1 = clips_ptr->x1 - left;
tmp[i].x2 = clips_ptr->x2 - left;
tmp[i].y1 = clips_ptr->y1 - top;
tmp[i].y2 = clips_ptr->y2 - top;
}
/* do per unit writing, reuse fifo for each */
for (i = 0; i < num_units; i++) {
struct vmw_display_unit *unit = units[i];
int clip_x1 = left - unit->crtc.x;
int clip_y1 = top - unit->crtc.y;
int clip_x2 = right - unit->crtc.x;
int clip_y2 = bottom - unit->crtc.y;
struct vmw_clip_rect clip;
int num;
clip.x1 = left - unit->crtc.x;
clip.y1 = top - unit->crtc.y;
clip.x2 = right - unit->crtc.x;
clip.y2 = bottom - unit->crtc.y;
/* skip any crtcs that misses the clip region */
if (clip_x1 >= unit->crtc.mode.hdisplay ||
clip_y1 >= unit->crtc.mode.vdisplay ||
clip_x2 <= 0 || clip_y2 <= 0)
if (clip.x1 >= unit->crtc.mode.hdisplay ||
clip.y1 >= unit->crtc.mode.vdisplay ||
clip.x2 <= 0 || clip.y2 <= 0)
continue;
/*
* In order for the clip rects to be correctly scaled
* the src and dest rects needs to be the same size.
*/
cmd->body.destRect.left = clip.x1;
cmd->body.destRect.right = clip.x2;
cmd->body.destRect.top = clip.y1;
cmd->body.destRect.bottom = clip.y2;
/* create a clip rect of the crtc in dest coords */
clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
clip.x1 = 0 - clip.x1;
clip.y1 = 0 - clip.y1;
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
cmd->body.destScreenId = unit->unit;
/*
* The blit command is a lot more resilient then the
* readback command when it comes to clip rects. So its
* okay to go out of bounds.
*/
/* clip and write blits to cmd stream */
vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
cmd->body.destRect.left = clip_x1;
cmd->body.destRect.right = clip_x2;
cmd->body.destRect.top = clip_y1;
cmd->body.destRect.bottom = clip_y2;
/* if no cliprects hit skip this */
if (num == 0)
continue;
/* recalculate package length */
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL);
@ -480,7 +553,10 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
break;
}
kfree(cmd);
out_free_tmp:
kfree(tmp);
return ret;
}
@ -556,6 +632,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
* Sanity checks.
*/
/* Surface must be marked as a scanout. */
if (unlikely(!surface->scanout))
return -EINVAL;
if (unlikely(surface->mip_levels[0] != 1 ||
surface->num_sizes != 1 ||
surface->sizes[0].width < mode_cmd->width ||
@ -782,6 +862,7 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
int clip_y1 = clips_ptr->y1 - unit->crtc.y;
int clip_x2 = clips_ptr->x2 - unit->crtc.x;
int clip_y2 = clips_ptr->y2 - unit->crtc.y;
int move_x, move_y;
/* skip any crtcs that misses the clip region */
if (clip_x1 >= unit->crtc.mode.hdisplay ||
@ -789,12 +870,21 @@ static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
clip_x2 <= 0 || clip_y2 <= 0)
continue;
/* clip size to crtc size */
clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
/* translate both src and dest to bring clip into screen */
move_x = min_t(int, clip_x1, 0);
move_y = min_t(int, clip_y1, 0);
/* actual translate done here */
blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
blits[hit_num].body.destScreenId = unit->unit;
blits[hit_num].body.srcOrigin.x = clips_ptr->x1;
blits[hit_num].body.srcOrigin.y = clips_ptr->y1;
blits[hit_num].body.destRect.left = clip_x1;
blits[hit_num].body.destRect.top = clip_y1;
blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
blits[hit_num].body.destRect.left = clip_x1 - move_x;
blits[hit_num].body.destRect.top = clip_y1 - move_y;
blits[hit_num].body.destRect.right = clip_x2;
blits[hit_num].body.destRect.bottom = clip_y2;
hit_num++;
@ -1033,46 +1123,29 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
}
/**
* End conditioned code.
*/
ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
mode_cmd->handle, &surface);
/* returns either a dmabuf or surface */
ret = vmw_user_lookup_handle(dev_priv, tfile,
mode_cmd->handle,
&surface, &bo);
if (ret)
goto try_dmabuf;
goto err_out;
if (!surface->scanout)
goto err_not_scanout;
/* Create the new framebuffer depending one what we got back */
if (bo)
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
mode_cmd);
else if (surface)
ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
surface, &vfb, mode_cmd);
else
BUG();
ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
&vfb, mode_cmd);
/* vmw_user_surface_lookup takes one ref so does new_fb */
vmw_surface_unreference(&surface);
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
ttm_base_object_unref(&user_obj);
return ERR_PTR(ret);
} else
vfb->user_obj = user_obj;
return &vfb->base;
try_dmabuf:
DRM_INFO("%s: trying buffer\n", __func__);
ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
if (ret) {
DRM_ERROR("failed to find buffer: %i\n", ret);
return ERR_PTR(-ENOENT);
}
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
mode_cmd);
/* vmw_user_dmabuf_lookup takes one ref so does new_fb */
vmw_dmabuf_unreference(&bo);
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo)
vmw_dmabuf_unreference(&bo);
if (surface)
vmw_surface_unreference(&surface);
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
@ -1082,14 +1155,6 @@ try_dmabuf:
vfb->user_obj = user_obj;
return &vfb->base;
err_not_scanout:
DRM_ERROR("surface not marked as scanout\n");
/* vmw_user_surface_lookup takes one ref */
vmw_surface_unreference(&surface);
ttm_base_object_unref(&user_obj);
return ERR_PTR(-EINVAL);
}
static struct drm_mode_config_funcs vmw_kms_funcs = {
@ -1106,10 +1171,12 @@ int vmw_kms_present(struct vmw_private *dev_priv,
uint32_t num_clips)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, k, num_units;
int ret = 0; /* silence warning */
int left, right, top, bottom;
struct {
SVGA3dCmdHeader header;
@ -1127,60 +1194,95 @@ int vmw_kms_present(struct vmw_private *dev_priv,
BUG_ON(surface == NULL);
BUG_ON(!clips || !num_clips);
tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
if (unlikely(tmp == NULL)) {
DRM_ERROR("Temporary cliprect memory alloc failed.\n");
return -ENOMEM;
}
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary fifo memory.\n");
return -ENOMEM;
ret = -ENOMEM;
goto out_free_tmp;
}
left = clips->x;
right = clips->x + clips->w;
top = clips->y;
bottom = clips->y + clips->h;
for (i = 1; i < num_clips; i++) {
left = min_t(int, left, (int)clips[i].x);
right = max_t(int, right, (int)clips[i].x + clips[i].w);
top = min_t(int, top, (int)clips[i].y);
bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
}
/* only need to do this once */
memset(cmd, 0, fifo_size);
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
cmd->body.srcRect.left = 0;
cmd->body.srcRect.right = surface->sizes[0].width;
cmd->body.srcRect.top = 0;
cmd->body.srcRect.bottom = surface->sizes[0].height;
blits = (SVGASignedRect *)&cmd[1];
cmd->body.srcRect.left = left;
cmd->body.srcRect.right = right;
cmd->body.srcRect.top = top;
cmd->body.srcRect.bottom = bottom;
for (i = 0; i < num_clips; i++) {
blits[i].left = clips[i].x;
blits[i].right = clips[i].x + clips[i].w;
blits[i].top = clips[i].y;
blits[i].bottom = clips[i].y + clips[i].h;
tmp[i].x1 = clips[i].x - left;
tmp[i].x2 = clips[i].x + clips[i].w - left;
tmp[i].y1 = clips[i].y - top;
tmp[i].y2 = clips[i].y + clips[i].h - top;
}
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
int clip_x1 = destX - unit->crtc.x;
int clip_y1 = destY - unit->crtc.y;
int clip_x2 = clip_x1 + surface->sizes[0].width;
int clip_y2 = clip_y1 + surface->sizes[0].height;
struct vmw_clip_rect clip;
int num;
clip.x1 = left + destX - unit->crtc.x;
clip.y1 = top + destY - unit->crtc.y;
clip.x2 = right + destX - unit->crtc.x;
clip.y2 = bottom + destY - unit->crtc.y;
/* skip any crtcs that misses the clip region */
if (clip_x1 >= unit->crtc.mode.hdisplay ||
clip_y1 >= unit->crtc.mode.vdisplay ||
clip_x2 <= 0 || clip_y2 <= 0)
if (clip.x1 >= unit->crtc.mode.hdisplay ||
clip.y1 >= unit->crtc.mode.vdisplay ||
clip.x2 <= 0 || clip.y2 <= 0)
continue;
/*
* In order for the clip rects to be correctly scaled
* the src and dest rects needs to be the same size.
*/
cmd->body.destRect.left = clip.x1;
cmd->body.destRect.right = clip.x2;
cmd->body.destRect.top = clip.y1;
cmd->body.destRect.bottom = clip.y2;
/* create a clip rect of the crtc in dest coords */
clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
clip.x1 = 0 - clip.x1;
clip.y1 = 0 - clip.y1;
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = sid;
cmd->body.destScreenId = unit->unit;
/*
* The blit command is a lot more resilient then the
* readback command when it comes to clip rects. So its
* okay to go out of bounds.
*/
/* clip and write blits to cmd stream */
vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
cmd->body.destRect.left = clip_x1;
cmd->body.destRect.right = clip_x2;
cmd->body.destRect.top = clip_y1;
cmd->body.destRect.bottom = clip_y2;
/* if no cliprects hit skip this */
if (num == 0)
continue;
/* recalculate package length */
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL);
@ -1189,6 +1291,8 @@ int vmw_kms_present(struct vmw_private *dev_priv,
}
kfree(cmd);
out_free_tmp:
kfree(tmp);
return ret;
}

Просмотреть файл

@ -62,9 +62,14 @@ struct vmw_framebuffer {
int vmw_cursor_update_image(struct vmw_private *dev_priv,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY);
int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
u32 width, u32 height,
u32 hotspotX, u32 hotspotY);
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y);
/**
* Base class display unit.
*

Просмотреть файл

@ -74,9 +74,10 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
{
struct vmw_legacy_display *lds = dev_priv->ldu_priv;
struct vmw_legacy_display_unit *entry;
struct vmw_display_unit *du = NULL;
struct drm_framebuffer *fb = NULL;
struct drm_crtc *crtc = NULL;
int i = 0;
int i = 0, ret;
/* If there is no display topology the host just assumes
* that the guest will set the same layout as the host.
@ -129,6 +130,25 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
lds->last_num_active = lds->num_active;
/* Find the first du with a cursor. */
list_for_each_entry(entry, &lds->active, active) {
du = &entry->base;
if (!du->cursor_dmabuf)
continue;
ret = vmw_cursor_update_dmabuf(dev_priv,
du->cursor_dmabuf,
64, 64,
du->hotspot_x,
du->hotspot_y);
if (ret == 0)
break;
DRM_ERROR("Could not update cursor image\n");
}
return 0;
}

Просмотреть файл

@ -1190,6 +1190,29 @@ void vmw_resource_unreserve(struct list_head *list)
write_unlock(lock);
}
/**
* Helper function that looks either a surface or dmabuf.
*
* The pointer this pointed at by out_surf and out_buf needs to be null.
*/
int vmw_user_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,
uint32_t handle,
struct vmw_surface **out_surf,
struct vmw_dma_buffer **out_buf)
{
int ret;
BUG_ON(*out_surf || *out_buf);
ret = vmw_user_surface_lookup_handle(dev_priv, tfile, handle, out_surf);
if (!ret)
return 0;
ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
return ret;
}
int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
struct ttm_object_file *tfile,

Просмотреть файл

@ -59,7 +59,7 @@ static ssize_t jz4740_hwmon_read_adcin(struct device *dev,
{
struct jz4740_hwmon *hwmon = dev_get_drvdata(dev);
struct completion *completion = &hwmon->read_completion;
unsigned long t;
long t;
unsigned long val;
int ret;
@ -203,7 +203,7 @@ static int __devexit jz4740_hwmon_remove(struct platform_device *pdev)
return 0;
}
struct platform_driver jz4740_hwmon_driver = {
static struct platform_driver jz4740_hwmon_driver = {
.probe = jz4740_hwmon_probe,
.remove = __devexit_p(jz4740_hwmon_remove),
.driver = {

Просмотреть файл

@ -893,6 +893,13 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
/* Set the number of I2C channel instance */
adap_info->ch_num = id->driver_data;
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
KBUILD_MODNAME, adap_info);
if (ret) {
pch_pci_err(pdev, "request_irq FAILED\n");
goto err_request_irq;
}
for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter;
adap_info->pch_i2c_suspended = false;
@ -910,28 +917,23 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
pch_adap->dev.parent = &pdev->dev;
pch_i2c_init(&adap_info->pch_data[i]);
ret = i2c_add_adapter(pch_adap);
if (ret) {
pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
goto err_i2c_add_adapter;
goto err_add_adapter;
}
pch_i2c_init(&adap_info->pch_data[i]);
}
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
KBUILD_MODNAME, adap_info);
if (ret) {
pch_pci_err(pdev, "request_irq FAILED\n");
goto err_i2c_add_adapter;
}
pci_set_drvdata(pdev, adap_info);
pch_pci_dbg(pdev, "returns %d.\n", ret);
return 0;
err_i2c_add_adapter:
err_add_adapter:
for (j = 0; j < i; j++)
i2c_del_adapter(&adap_info->pch_data[j].pch_adapter);
free_irq(pdev->irq, adap_info);
err_request_irq:
pci_iounmap(pdev, base_addr);
err_pci_iomap:
pci_release_regions(pdev);

Просмотреть файл

@ -1047,13 +1047,14 @@ omap_i2c_probe(struct platform_device *pdev)
* size. This is to ensure that we can handle the status on int
* call back latencies.
*/
if (dev->rev >= OMAP_I2C_REV_ON_3530_4430) {
dev->fifo_size = 0;
dev->fifo_size = (dev->fifo_size / 2);
if (dev->rev >= OMAP_I2C_REV_ON_3530_4430)
dev->b_hw = 0; /* Disable hardware fixes */
} else {
dev->fifo_size = (dev->fifo_size / 2);
else
dev->b_hw = 1; /* Enable hardware fixes */
}
/* calculate wakeup latency constraint for MPU */
if (dev->set_mpu_wkup_lat != NULL)
dev->latency = (1000000 * dev->fifo_size) /

Просмотреть файл

@ -534,6 +534,7 @@ static int s3c24xx_i2c_doxfer(struct s3c24xx_i2c *i2c,
/* first, try busy waiting briefly */
do {
cpu_relax();
iicstat = readl(i2c->regs + S3C2410_IICSTAT);
} while ((iicstat & S3C2410_IICSTAT_START) && --spins);
@ -786,7 +787,7 @@ static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
#else
static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
{
return -EINVAL;
return 0;
}
static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)

Просмотреть файл

@ -2513,6 +2513,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
req.private_data_len = sizeof(struct cma_hdr) +
conn_param->private_data_len;
if (req.private_data_len < conn_param->private_data_len)
return -EINVAL;
req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
if (!req.private_data)
return -ENOMEM;
@ -2562,6 +2565,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
memset(&req, 0, sizeof req);
offset = cma_user_data_offset(id_priv->id.ps);
req.private_data_len = offset + conn_param->private_data_len;
if (req.private_data_len < conn_param->private_data_len)
return -EINVAL;
private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
if (!private_data)
return -ENOMEM;

Просмотреть файл

@ -1244,7 +1244,8 @@ err_reg:
err_counter:
for (; i; --i)
mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
if (ibdev->counters[i - 1] != -1)
mlx4_counter_free(ibdev->dev, ibdev->counters[i - 1]);
err_map:
iounmap(ibdev->uar_map);
@ -1275,7 +1276,8 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
}
iounmap(ibdev->uar_map);
for (p = 0; p < ibdev->num_ports; ++p)
mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
if (ibdev->counters[p] != -1)
mlx4_counter_free(ibdev->dev, ibdev->counters[p]);
mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
mlx4_CLOSE_PORT(dev, p);

Просмотреть файл

@ -1285,7 +1285,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt,
strlcpy(rcd->comm, current->comm, sizeof(rcd->comm));
ctxt_fp(fp) = rcd;
qib_stats.sps_ctxts++;
dd->freectxts++;
dd->freectxts--;
ret = 0;
goto bail;
@ -1794,7 +1794,7 @@ static int qib_close(struct inode *in, struct file *fp)
if (dd->pageshadow)
unlock_expected_tids(rcd);
qib_stats.sps_ctxts--;
dd->freectxts--;
dd->freectxts++;
}
mutex_unlock(&qib_mutex);

Просмотреть файл

@ -115,8 +115,8 @@ static void decode_mg(struct cma3000_accl_data *data, int *datax,
static irqreturn_t cma3000_thread_irq(int irq, void *dev_id)
{
struct cma3000_accl_data *data = dev_id;
int datax, datay, dataz;
u8 ctrl, mode, range, intr_status;
int datax, datay, dataz, intr_status;
u8 ctrl, mode, range;
intr_status = CMA3000_READ(data, CMA3000_INTSTATUS, "interrupt status");
if (intr_status < 0)

Просмотреть файл

@ -24,6 +24,7 @@
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/dmi.h>
#include <linux/input/mt.h>
#include <linux/serio.h>
@ -1220,6 +1221,16 @@ static int synaptics_reconnect(struct psmouse *psmouse)
do {
psmouse_reset(psmouse);
if (retry) {
/*
* On some boxes, right after resuming, the touchpad
* needs some time to finish initializing (I assume
* it needs time to calibrate) and start responding
* to Synaptics-specific queries, so let's wait a
* bit.
*/
ssleep(1);
}
error = synaptics_detect(psmouse, 0);
} while (error && ++retry < 3);

Просмотреть файл

@ -1470,6 +1470,9 @@ static const struct wacom_features wacom_features_0xE3 =
static const struct wacom_features wacom_features_0xE6 =
{ "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255,
0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0xEC =
{ "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255,
0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
static const struct wacom_features wacom_features_0x47 =
{ "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023,
31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@ -1611,6 +1614,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0xE2) },
{ USB_DEVICE_WACOM(0xE3) },
{ USB_DEVICE_WACOM(0xE6) },
{ USB_DEVICE_WACOM(0xEC) },
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }

Просмотреть файл

@ -405,6 +405,9 @@ int dmar_disabled = 0;
int dmar_disabled = 1;
#endif /*CONFIG_INTEL_IOMMU_DEFAULT_ON*/
int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
@ -3647,6 +3650,8 @@ int __init intel_iommu_init(void)
bus_register_notifier(&pci_bus_type, &device_nb);
intel_iommu_enabled = 1;
return 0;
}

Просмотреть файл

@ -1393,9 +1393,6 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
atomic_read(&bitmap->behind_writes),
bitmap->mddev->bitmap_info.max_write_behind);
}
if (bitmap->mddev->degraded)
/* Never clear bits or update events_cleared when degraded */
success = 0;
while (sectors) {
sector_t blocks;
@ -1409,7 +1406,7 @@ void bitmap_endwrite(struct bitmap *bitmap, sector_t offset, unsigned long secto
return;
}
if (success &&
if (success && !bitmap->mddev->degraded &&
bitmap->events_cleared < bitmap->mddev->events) {
bitmap->events_cleared = bitmap->mddev->events;
bitmap->need_sync = 1;

Просмотреть файл

@ -230,6 +230,7 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
return -EINVAL;
rdev->raid_disk = rdev->saved_raid_disk;
rdev->saved_raid_disk = -1;
newconf = linear_conf(mddev,mddev->raid_disks+1);

Просмотреть файл

@ -7360,8 +7360,7 @@ static int remove_and_add_spares(struct mddev *mddev)
spares++;
md_new_event(mddev);
set_bit(MD_CHANGE_DEVS, &mddev->flags);
} else
break;
}
}
}
}

Просмотреть файл

@ -3065,11 +3065,17 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
}
} else if (test_bit(In_sync, &rdev->flags))
set_bit(R5_Insync, &dev->flags);
else {
else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
/* in sync if before recovery_offset */
if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset)
set_bit(R5_Insync, &dev->flags);
}
set_bit(R5_Insync, &dev->flags);
else if (test_bit(R5_UPTODATE, &dev->flags) &&
test_bit(R5_Expanded, &dev->flags))
/* If we've reshaped into here, we assume it is Insync.
* We will shortly update recovery_offset to make
* it official.
*/
set_bit(R5_Insync, &dev->flags);
if (rdev && test_bit(R5_WriteError, &dev->flags)) {
clear_bit(R5_Insync, &dev->flags);
if (!test_bit(Faulty, &rdev->flags)) {

Просмотреть файл

@ -488,9 +488,10 @@ static int mxl5007t_write_regs(struct mxl5007t_state *state,
static int mxl5007t_read_reg(struct mxl5007t_state *state, u8 reg, u8 *val)
{
u8 buf[2] = { 0xfb, reg };
struct i2c_msg msg[] = {
{ .addr = state->i2c_props.addr, .flags = 0,
.buf = &reg, .len = 1 },
.buf = buf, .len = 2 },
{ .addr = state->i2c_props.addr, .flags = I2C_M_RD,
.buf = val, .len = 1 },
};

Просмотреть файл

@ -141,7 +141,7 @@ static int tda18218_set_params(struct dvb_frontend *fe,
switch (params->u.ofdm.bandwidth) {
case BANDWIDTH_6_MHZ:
LP_Fc = 0;
LO_Frac = params->frequency + 4000000;
LO_Frac = params->frequency + 3000000;
break;
case BANDWIDTH_7_MHZ:
LP_Fc = 1;

Просмотреть файл

@ -189,7 +189,7 @@ struct ati_remote {
dma_addr_t inbuf_dma;
dma_addr_t outbuf_dma;
unsigned char old_data[2]; /* Detect duplicate events */
unsigned char old_data; /* Detect duplicate events */
unsigned long old_jiffies;
unsigned long acc_jiffies; /* handle acceleration */
unsigned long first_jiffies;
@ -221,35 +221,35 @@ struct ati_remote {
/* Translation table from hardware messages to input events. */
static const struct {
short kind;
unsigned char data1, data2;
unsigned char data;
int type;
unsigned int code;
int value;
} ati_remote_tbl[] = {
/* Directional control pad axes */
{KIND_ACCEL, 0x35, 0x70, EV_REL, REL_X, -1}, /* left */
{KIND_ACCEL, 0x36, 0x71, EV_REL, REL_X, 1}, /* right */
{KIND_ACCEL, 0x37, 0x72, EV_REL, REL_Y, -1}, /* up */
{KIND_ACCEL, 0x38, 0x73, EV_REL, REL_Y, 1}, /* down */
{KIND_ACCEL, 0x70, EV_REL, REL_X, -1}, /* left */
{KIND_ACCEL, 0x71, EV_REL, REL_X, 1}, /* right */
{KIND_ACCEL, 0x72, EV_REL, REL_Y, -1}, /* up */
{KIND_ACCEL, 0x73, EV_REL, REL_Y, 1}, /* down */
/* Directional control pad diagonals */
{KIND_LU, 0x39, 0x74, EV_REL, 0, 0}, /* left up */
{KIND_RU, 0x3a, 0x75, EV_REL, 0, 0}, /* right up */
{KIND_LD, 0x3c, 0x77, EV_REL, 0, 0}, /* left down */
{KIND_RD, 0x3b, 0x76, EV_REL, 0, 0}, /* right down */
{KIND_LU, 0x74, EV_REL, 0, 0}, /* left up */
{KIND_RU, 0x75, EV_REL, 0, 0}, /* right up */
{KIND_LD, 0x77, EV_REL, 0, 0}, /* left down */
{KIND_RD, 0x76, EV_REL, 0, 0}, /* right down */
/* "Mouse button" buttons */
{KIND_LITERAL, 0x3d, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
{KIND_LITERAL, 0x3e, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
{KIND_LITERAL, 0x41, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
{KIND_LITERAL, 0x42, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
{KIND_LITERAL, 0x78, EV_KEY, BTN_LEFT, 1}, /* left btn down */
{KIND_LITERAL, 0x79, EV_KEY, BTN_LEFT, 0}, /* left btn up */
{KIND_LITERAL, 0x7c, EV_KEY, BTN_RIGHT, 1},/* right btn down */
{KIND_LITERAL, 0x7d, EV_KEY, BTN_RIGHT, 0},/* right btn up */
/* Artificial "doubleclick" events are generated by the hardware.
* They are mapped to the "side" and "extra" mouse buttons here. */
{KIND_FILTERED, 0x3f, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
{KIND_FILTERED, 0x43, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
{KIND_FILTERED, 0x7a, EV_KEY, BTN_SIDE, 1}, /* left dblclick */
{KIND_FILTERED, 0x7e, EV_KEY, BTN_EXTRA, 1},/* right dblclick */
/* Non-mouse events are handled by rc-core */
{KIND_END, 0x00, 0x00, EV_MAX + 1, 0, 0}
{KIND_END, 0x00, EV_MAX + 1, 0, 0}
};
/* Local function prototypes */
@ -396,25 +396,6 @@ static int ati_remote_sendpacket(struct ati_remote *ati_remote, u16 cmd, unsigne
return retval;
}
/*
* ati_remote_event_lookup
*/
static int ati_remote_event_lookup(int rem, unsigned char d1, unsigned char d2)
{
int i;
for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
/*
* Decide if the table entry matches the remote input.
*/
if (ati_remote_tbl[i].data1 == d1 &&
ati_remote_tbl[i].data2 == d2)
return i;
}
return -1;
}
/*
* ati_remote_compute_accel
*
@ -463,7 +444,15 @@ static void ati_remote_input_report(struct urb *urb)
int index = -1;
int acc;
int remote_num;
unsigned char scancode[2];
unsigned char scancode;
int i;
/*
* data[0] = 0x14
* data[1] = data[2] + data[3] + 0xd5 (a checksum byte)
* data[2] = the key code (with toggle bit in MSB with some models)
* data[3] = channel << 4 (the low 4 bits must be zero)
*/
/* Deal with strange looking inputs */
if ( (urb->actual_length != 4) || (data[0] != 0x14) ||
@ -472,6 +461,13 @@ static void ati_remote_input_report(struct urb *urb)
return;
}
if (data[1] != ((data[2] + data[3] + 0xd5) & 0xff)) {
dbginfo(&ati_remote->interface->dev,
"wrong checksum in input: %02x %02x %02x %02x\n",
data[0], data[1], data[2], data[3]);
return;
}
/* Mask unwanted remote channels. */
/* note: remote_num is 0-based, channel 1 on remote == 0 here */
remote_num = (data[3] >> 4) & 0x0f;
@ -482,31 +478,30 @@ static void ati_remote_input_report(struct urb *urb)
return;
}
scancode[0] = (((data[1] - ((remote_num + 1) << 4)) & 0xf0) | (data[1] & 0x0f));
/*
* Some devices (e.g. SnapStream Firefly) use 8080 as toggle code,
* so we have to clear them. The first bit is a bit tricky as the
* "non-toggled" state depends on remote_num, so we xor it with the
* second bit which is only used for toggle.
* MSB is a toggle code, though only used by some devices
* (e.g. SnapStream Firefly)
*/
scancode[0] ^= (data[2] & 0x80);
scancode = data[2] & 0x7f;
scancode[1] = data[2] & ~0x80;
/* Look up event code index in mouse translation table. */
index = ati_remote_event_lookup(remote_num, scancode[0], scancode[1]);
/* Look up event code index in the mouse translation table. */
for (i = 0; ati_remote_tbl[i].kind != KIND_END; i++) {
if (scancode == ati_remote_tbl[i].data) {
index = i;
break;
}
}
if (index >= 0) {
dbginfo(&ati_remote->interface->dev,
"channel 0x%02x; mouse data %02x,%02x; index %d; keycode %d\n",
remote_num, data[1], data[2], index, ati_remote_tbl[index].code);
"channel 0x%02x; mouse data %02x; index %d; keycode %d\n",
remote_num, data[2], index, ati_remote_tbl[index].code);
if (!dev)
return; /* no mouse device */
} else
dbginfo(&ati_remote->interface->dev,
"channel 0x%02x; key data %02x,%02x, scancode %02x,%02x\n",
remote_num, data[1], data[2], scancode[0], scancode[1]);
"channel 0x%02x; key data %02x, scancode %02x\n",
remote_num, data[2], scancode);
if (index >= 0 && ati_remote_tbl[index].kind == KIND_LITERAL) {
@ -523,8 +518,7 @@ static void ati_remote_input_report(struct urb *urb)
unsigned long now = jiffies;
/* Filter duplicate events which happen "too close" together. */
if (ati_remote->old_data[0] == data[1] &&
ati_remote->old_data[1] == data[2] &&
if (ati_remote->old_data == data[2] &&
time_before(now, ati_remote->old_jiffies +
msecs_to_jiffies(repeat_filter))) {
ati_remote->repeat_count++;
@ -533,8 +527,7 @@ static void ati_remote_input_report(struct urb *urb)
ati_remote->first_jiffies = now;
}
ati_remote->old_data[0] = data[1];
ati_remote->old_data[1] = data[2];
ati_remote->old_data = data[2];
ati_remote->old_jiffies = now;
/* Ensure we skip at least the 4 first duplicate events (generated
@ -549,14 +542,13 @@ static void ati_remote_input_report(struct urb *urb)
if (index < 0) {
/* Not a mouse event, hand it to rc-core. */
u32 rc_code = (scancode[0] << 8) | scancode[1];
/*
* We don't use the rc-core repeat handling yet as
* it would cause ghost repeats which would be a
* regression for this driver.
*/
rc_keydown_notimeout(ati_remote->rdev, rc_code,
rc_keydown_notimeout(ati_remote->rdev, scancode,
data[2]);
rc_keyup(ati_remote->rdev);
return;
@ -607,8 +599,7 @@ static void ati_remote_input_report(struct urb *urb)
input_sync(dev);
ati_remote->old_jiffies = jiffies;
ati_remote->old_data[0] = data[1];
ati_remote->old_data[1] = data[2];
ati_remote->old_data = data[2];
}
}

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше