Merge branches 'sony-laptop', 'bugzilla-14247' and 'bugzilla-14271' into release

This commit is contained in:
Len Brown 2009-10-02 11:27:57 -04:00
Родитель 5e6f9725ac e12ac3d018 a83893ae90
Коммит 6effe5f577
348 изменённых файлов: 14123 добавлений и 13404 удалений

Просмотреть файл

@ -232,7 +232,7 @@ your e-mail client so that it sends your patches untouched.
When sending patches to Linus, always follow step #7.
Large changes are not appropriate for mailing lists, and some
maintainers. If your patch, uncompressed, exceeds 40 kB in size,
maintainers. If your patch, uncompressed, exceeds 300 kB in size,
it is preferred that you store your patch on an Internet-accessible
server, and provide instead a URL (link) pointing to your patch.

Просмотреть файл

@ -282,9 +282,16 @@ stripe=n Number of filesystem blocks that mballoc will try
to use for allocation size and alignment. For RAID5/6
systems this should be the number of data
disks * RAID chunk size in file system blocks.
delalloc (*) Deferring block allocation until write-out time.
nodelalloc Disable delayed allocation. Blocks are allocation
when data is copied from user to page cache.
delalloc (*) Defer block allocation until just before ext4
writes out the block(s) in question. This
allows ext4 to better allocation decisions
more efficiently.
nodelalloc Disable delayed allocation. Blocks are allocated
when the data is copied from userspace to the
page cache, either via the write(2) system call
or when an mmap'ed page which was previously
unallocated is written for the first time.
max_batch_time=usec Maximum amount of time ext4 should wait for
additional filesystem operations to be batch

Просмотреть файл

@ -1113,7 +1113,6 @@ Table 1-12: Files in /proc/fs/ext4/<devname>
..............................................................................
File Content
mb_groups details of multiblock allocator buddy cache of free blocks
mb_history multiblock allocation history
..............................................................................

Просмотреть файл

@ -102,7 +102,7 @@ shortname=lower|win95|winnt|mixed
winnt: emulate the Windows NT rule for display/create.
mixed: emulate the Windows NT rule for display,
emulate the Windows 95 rule for create.
Default setting is `lower'.
Default setting is `mixed'.
tz=UTC -- Interpret timestamps as UTC rather than local time.
This option disables the conversion of timestamps

Просмотреть файл

@ -741,23 +741,36 @@ M: Dirk Opfer <dirk@opfer-online.de>
S: Maintained
ARM/PALMTX,PALMT5,PALMLD,PALMTE2,PALMTC SUPPORT
P: Marek Vasut
M: marek.vasut@gmail.com
M: Marek Vasut <marek.vasut@gmail.com>
L: linux-arm-kernel@lists.infradead.org
W: http://hackndev.com
S: Maintained
F: arch/arm/mach-pxa/include/mach/palmtx.h
F: arch/arm/mach-pxa/palmtx.c
F: arch/arm/mach-pxa/include/mach/palmt5.h
F: arch/arm/mach-pxa/palmt5.c
F: arch/arm/mach-pxa/include/mach/palmld.h
F: arch/arm/mach-pxa/palmld.c
F: arch/arm/mach-pxa/include/mach/palmte2.h
F: arch/arm/mach-pxa/palmte2.c
F: arch/arm/mach-pxa/include/mach/palmtc.h
F: arch/arm/mach-pxa/palmtc.c
ARM/PALM TREO 680 SUPPORT
M: Tomas Cech <sleep_walker@suse.cz>
L: linux-arm-kernel@lists.infradead.org
W: http://hackndev.com
S: Maintained
F: arch/arm/mach-pxa/include/mach/treo680.h
F: arch/arm/mach-pxa/treo680.c
ARM/PALMZ72 SUPPORT
M: Sergey Lapin <slapin@ossfans.org>
L: linux-arm-kernel@lists.infradead.org
W: http://hackndev.com
S: Maintained
F: arch/arm/mach-pxa/include/mach/palmz72.h
F: arch/arm/mach-pxa/palmz72.c
ARM/PLEB SUPPORT
M: Peter Chubb <pleb@gelato.unsw.edu.au>

Просмотреть файл

@ -195,7 +195,7 @@ static int clk_debugfs_open(struct inode *inode, struct file *file)
return single_open(file, clk_debugfs_show, NULL);
}
static struct file_operations clk_debugfs_operations = {
static const struct file_operations clk_debugfs_operations = {
.open = clk_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,

Просмотреть файл

@ -38,7 +38,7 @@ static struct omap_id omap_ids[] __initdata = {
{ .jtag_id = 0xb574, .die_rev = 0x2, .omap_id = 0x03310315, .type = 0x03100000},
{ .jtag_id = 0x355f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300100},
{ .jtag_id = 0xb55f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300300},
{ .jtag_id = 0xb55f, .die_rev = 0x0, .omap_id = 0x03320500, .type = 0x08500000},
{ .jtag_id = 0xb62c, .die_rev = 0x1, .omap_id = 0x03320500, .type = 0x08500000},
{ .jtag_id = 0xb470, .die_rev = 0x0, .omap_id = 0x03310100, .type = 0x15100000},
{ .jtag_id = 0xb576, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x16100000},
{ .jtag_id = 0xb576, .die_rev = 0x2, .omap_id = 0x03320100, .type = 0x16110000},

Просмотреть файл

@ -54,7 +54,7 @@
#define TWL4030_MSECURE_GPIO 22
static int sdp3430_keymap[] = {
static int board_keymap[] = {
KEY(0, 0, KEY_LEFT),
KEY(0, 1, KEY_RIGHT),
KEY(0, 2, KEY_A),
@ -88,11 +88,15 @@ static int sdp3430_keymap[] = {
0
};
static struct matrix_keymap_data board_map_data = {
.keymap = board_keymap,
.keymap_size = ARRAY_SIZE(board_keymap),
};
static struct twl4030_keypad_data sdp3430_kp_data = {
.keymap_data = &board_map_data,
.rows = 5,
.cols = 6,
.keymap = sdp3430_keymap,
.keymapsize = ARRAY_SIZE(sdp3430_keymap),
.rep = 1,
};

Просмотреть файл

@ -80,7 +80,7 @@ static struct platform_device ldp_smsc911x_device = {
},
};
static int ldp_twl4030_keymap[] = {
static int board_keymap[] = {
KEY(0, 0, KEY_1),
KEY(1, 0, KEY_2),
KEY(2, 0, KEY_3),
@ -101,11 +101,15 @@ static int ldp_twl4030_keymap[] = {
0
};
static struct matrix_keymap_data board_map_data = {
.keymap = board_keymap,
.keymap_size = ARRAY_SIZE(board_keymap),
};
static struct twl4030_keypad_data ldp_kp_twl4030_data = {
.keymap_data = &board_map_data,
.rows = 6,
.cols = 6,
.keymap = ldp_twl4030_keymap,
.keymapsize = ARRAY_SIZE(ldp_twl4030_keymap),
.rep = 1,
};

Просмотреть файл

@ -139,8 +139,13 @@ static struct gpio_led gpio_leds[];
static int beagle_twl_gpio_setup(struct device *dev,
unsigned gpio, unsigned ngpio)
{
if (system_rev >= 0x20 && system_rev <= 0x34301000) {
omap_cfg_reg(AG9_34XX_GPIO23);
mmc[0].gpio_wp = 23;
} else {
omap_cfg_reg(AH8_34XX_GPIO29);
}
/* gpio + 0 is "mmc0_cd" (input/IRQ) */
omap_cfg_reg(AH8_34XX_GPIO29);
mmc[0].gpio_cd = gpio + 0;
twl4030_mmc_init(mmc);

Просмотреть файл

@ -159,7 +159,7 @@ static struct twl4030_usb_data omap3evm_usb_data = {
.usb_mode = T2_USB_MODE_ULPI,
};
static int omap3evm_keymap[] = {
static int board_keymap[] = {
KEY(0, 0, KEY_LEFT),
KEY(0, 1, KEY_RIGHT),
KEY(0, 2, KEY_A),
@ -178,11 +178,15 @@ static int omap3evm_keymap[] = {
KEY(3, 3, KEY_P)
};
static struct matrix_keymap_data board_map_data = {
.keymap = board_keymap,
.keymap_size = ARRAY_SIZE(board_keymap),
};
static struct twl4030_keypad_data omap3evm_kp_data = {
.keymap_data = &board_map_data,
.rows = 4,
.cols = 4,
.keymap = omap3evm_keymap,
.keymapsize = ARRAY_SIZE(omap3evm_keymap),
.rep = 1,
};

Просмотреть файл

@ -133,7 +133,7 @@ static void __init pandora_keys_gpio_init(void)
omap_set_gpio_debounce_time(32 * 5, GPIO_DEBOUNCE_TIME);
}
static int pandora_keypad_map[] = {
static int board_keymap[] = {
/* col, row, code */
KEY(0, 0, KEY_9),
KEY(0, 1, KEY_0),
@ -180,11 +180,15 @@ static int pandora_keypad_map[] = {
KEY(5, 2, KEY_FN),
};
static struct matrix_keymap_data board_map_data = {
.keymap = board_keymap,
.keymap_size = ARRAY_SIZE(board_keymap),
};
static struct twl4030_keypad_data pandora_kp_data = {
.keymap_data = &board_map_data,
.rows = 8,
.cols = 6,
.keymap = pandora_keypad_map,
.keymapsize = ARRAY_SIZE(pandora_keypad_map),
.rep = 1,
};

Просмотреть файл

@ -36,7 +36,7 @@
#define SYSTEM_REV_B_USES_VAUX3 0x1699
#define SYSTEM_REV_S_USES_VAUX3 0x8
static int rx51_keymap[] = {
static int board_keymap[] = {
KEY(0, 0, KEY_Q),
KEY(0, 1, KEY_W),
KEY(0, 2, KEY_E),
@ -83,11 +83,15 @@ static int rx51_keymap[] = {
KEY(0xff, 5, KEY_F10),
};
static struct matrix_keymap_data board_map_data = {
.keymap = board_keymap,
.keymap_size = ARRAY_SIZE(board_keymap),
};
static struct twl4030_keypad_data rx51_kp_data = {
.keymap_data = &board_map_data,
.rows = 8,
.cols = 8,
.keymap = rx51_keymap,
.keymapsize = ARRAY_SIZE(rx51_keymap),
.rep = 1,
};

Просмотреть файл

@ -27,7 +27,7 @@
#include "mmc-twl4030.h"
/* Zoom2 has Qwerty keyboard*/
static int zoom2_twl4030_keymap[] = {
static int board_keymap[] = {
KEY(0, 0, KEY_E),
KEY(1, 0, KEY_R),
KEY(2, 0, KEY_T),
@ -82,11 +82,15 @@ static int zoom2_twl4030_keymap[] = {
0
};
static struct matrix_keymap_data board_map_data = {
.keymap = board_keymap,
.keymap_size = ARRAY_SIZE(board_keymap),
};
static struct twl4030_keypad_data zoom2_kp_twl4030_data = {
.keymap_data = &board_map_data,
.rows = 8,
.cols = 8,
.keymap = zoom2_twl4030_keymap,
.keymapsize = ARRAY_SIZE(zoom2_twl4030_keymap),
.rep = 1,
};

Просмотреть файл

@ -22,7 +22,6 @@
#include <asm/atomic.h>
#include "cm.h"
#include "cm-regbits-4xxx.h"
/* XXX move this to cm.h */
/* MAX_MODULE_READY_TIME: max milliseconds for module to leave idle */
@ -50,19 +49,7 @@
*/
int omap4_cm_wait_idlest_ready(u32 prcm_mod, u8 prcm_dev_offs)
{
int i = 0;
u8 cm_id;
u16 prcm_mod_offs;
u32 mask = OMAP4_PRCM_CM_CLKCTRL_IDLEST_MASK;
cm_id = prcm_mod >> OMAP4_PRCM_MOD_CM_ID_SHIFT;
prcm_mod_offs = prcm_mod & OMAP4_PRCM_MOD_OFFS_MASK;
while (((omap4_cm_read_mod_reg(cm_id, prcm_mod_offs, prcm_dev_offs,
OMAP4_CM_CLKCTRL_DREG) & mask) != 0) &&
(i++ < MAX_MODULE_READY_TIME))
udelay(1);
return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY;
/* FIXME: Add clock manager related code */
return 0;
}

Просмотреть файл

@ -355,29 +355,60 @@ static struct platform_device omap2_mcspi4 = {
};
#endif
static void omap_init_mcspi(void)
#ifdef CONFIG_ARCH_OMAP4
static inline void omap4_mcspi_fixup(void)
{
if (cpu_is_omap44xx()) {
omap2_mcspi1_resources[0].start = OMAP4_MCSPI1_BASE;
omap2_mcspi1_resources[0].end = OMAP4_MCSPI1_BASE + 0xff;
omap2_mcspi2_resources[0].start = OMAP4_MCSPI2_BASE;
omap2_mcspi2_resources[0].end = OMAP4_MCSPI2_BASE + 0xff;
omap2_mcspi3_resources[0].start = OMAP4_MCSPI3_BASE;
omap2_mcspi3_resources[0].end = OMAP4_MCSPI3_BASE + 0xff;
omap2_mcspi4_resources[0].start = OMAP4_MCSPI4_BASE;
omap2_mcspi4_resources[0].end = OMAP4_MCSPI4_BASE + 0xff;
}
platform_device_register(&omap2_mcspi1);
platform_device_register(&omap2_mcspi2);
omap2_mcspi1_resources[0].start = OMAP4_MCSPI1_BASE;
omap2_mcspi1_resources[0].end = OMAP4_MCSPI1_BASE + 0xff;
omap2_mcspi2_resources[0].start = OMAP4_MCSPI2_BASE;
omap2_mcspi2_resources[0].end = OMAP4_MCSPI2_BASE + 0xff;
omap2_mcspi3_resources[0].start = OMAP4_MCSPI3_BASE;
omap2_mcspi3_resources[0].end = OMAP4_MCSPI3_BASE + 0xff;
omap2_mcspi4_resources[0].start = OMAP4_MCSPI4_BASE;
omap2_mcspi4_resources[0].end = OMAP4_MCSPI4_BASE + 0xff;
}
#else
static inline void omap4_mcspi_fixup(void)
{
}
#endif
#if defined(CONFIG_ARCH_OMAP2430) || defined(CONFIG_ARCH_OMAP3) || \
defined(CONFIG_ARCH_OMAP4)
if (cpu_is_omap2430() || cpu_is_omap343x() || cpu_is_omap44xx())
platform_device_register(&omap2_mcspi3);
static inline void omap2_mcspi3_init(void)
{
platform_device_register(&omap2_mcspi3);
}
#else
static inline void omap2_mcspi3_init(void)
{
}
#endif
#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4)
if (cpu_is_omap343x() || cpu_is_omap44xx())
platform_device_register(&omap2_mcspi4);
static inline void omap2_mcspi4_init(void)
{
platform_device_register(&omap2_mcspi4);
}
#else
static inline void omap2_mcspi4_init(void)
{
}
#endif
static void omap_init_mcspi(void)
{
if (cpu_is_omap44xx())
omap4_mcspi_fixup();
platform_device_register(&omap2_mcspi1);
platform_device_register(&omap2_mcspi2);
if (cpu_is_omap2430() || cpu_is_omap343x() || cpu_is_omap44xx())
omap2_mcspi3_init();
if (cpu_is_omap343x() || cpu_is_omap44xx())
omap2_mcspi4_init();
}
#else

Просмотреть файл

@ -294,10 +294,10 @@ void __init omap2_init_common_hw(struct omap_sdrc_params *sdrc_cs0,
else if (cpu_is_omap34xx())
hwmods = omap34xx_hwmods;
omap_hwmod_init(hwmods);
omap2_mux_init();
#ifndef CONFIG_ARCH_OMAP4 /* FIXME: Remove this once the clkdev is ready */
/* The OPP tables have to be registered before a clk init */
omap_hwmod_init(hwmods);
omap2_mux_init();
omap_pm_if_early_init(mpu_opps, dsp_opps, l3_opps);
pwrdm_init(powerdomains_omap);
clkdm_init(clockdomains_omap, clkdm_pwrdm_autodeps);

Просмотреть файл

@ -79,7 +79,7 @@ static int omap2_iommu_enable(struct iommu *obj)
l = iommu_read_reg(obj, MMU_SYSSTATUS);
if (l & MMU_SYS_RESETDONE)
break;
} while (time_after(jiffies, timeout));
} while (!time_after(jiffies, timeout));
if (!(l & MMU_SYS_RESETDONE)) {
dev_err(obj->dev, "can't take mmu out of reset\n");

Просмотреть файл

@ -30,6 +30,14 @@
#define MAILBOX_IRQ_NEWMSG(u) (1 << (2 * (u)))
#define MAILBOX_IRQ_NOTFULL(u) (1 << (2 * (u) + 1))
/* SYSCONFIG: register bit definition */
#define AUTOIDLE (1 << 0)
#define SOFTRESET (1 << 1)
#define SMARTIDLE (2 << 3)
/* SYSSTATUS: register bit definition */
#define RESETDONE (1 << 0)
#define MBOX_REG_SIZE 0x120
#define MBOX_NR_REGS (MBOX_REG_SIZE / sizeof(u32))
@ -69,21 +77,33 @@ static inline void mbox_write_reg(u32 val, size_t ofs)
/* Mailbox H/W preparations */
static int omap2_mbox_startup(struct omap_mbox *mbox)
{
unsigned int l;
u32 l;
unsigned long timeout;
mbox_ick_handle = clk_get(NULL, "mailboxes_ick");
if (IS_ERR(mbox_ick_handle)) {
printk("Could not get mailboxes_ick\n");
pr_err("Can't get mailboxes_ick\n");
return -ENODEV;
}
clk_enable(mbox_ick_handle);
mbox_write_reg(SOFTRESET, MAILBOX_SYSCONFIG);
timeout = jiffies + msecs_to_jiffies(20);
do {
l = mbox_read_reg(MAILBOX_SYSSTATUS);
if (l & RESETDONE)
break;
} while (!time_after(jiffies, timeout));
if (!(l & RESETDONE)) {
pr_err("Can't take mmu out of reset\n");
return -ENODEV;
}
l = mbox_read_reg(MAILBOX_REVISION);
pr_info("omap mailbox rev %d.%d\n", (l & 0xf0) >> 4, (l & 0x0f));
/* set smart-idle & autoidle */
l = mbox_read_reg(MAILBOX_SYSCONFIG);
l |= 0x00000011;
l = SMARTIDLE | AUTOIDLE;
mbox_write_reg(l, MAILBOX_SYSCONFIG);
omap2_mbox_enable_irq(mbox, IRQ_RX);
@ -156,6 +176,9 @@ static void omap2_mbox_ack_irq(struct omap_mbox *mbox,
u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit;
mbox_write_reg(bit, p->irqstatus);
/* Flush posted write for irq status to avoid spurious interrupts */
mbox_read_reg(p->irqstatus);
}
static int omap2_mbox_is_irq(struct omap_mbox *mbox,

Просмотреть файл

@ -460,6 +460,8 @@ MUX_CFG_34XX("AF26_34XX_GPIO0", 0x1e0,
OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
MUX_CFG_34XX("AF22_34XX_GPIO9", 0xa18,
OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
MUX_CFG_34XX("AG9_34XX_GPIO23", 0x5ee,
OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
MUX_CFG_34XX("AH8_34XX_GPIO29", 0x5fa,
OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_INPUT)
MUX_CFG_34XX("U8_34XX_GPIO54_OUT", 0x0b4,
@ -472,6 +474,8 @@ MUX_CFG_34XX("G25_34XX_GPIO86_OUT", 0x0fc,
OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
MUX_CFG_34XX("AG4_34XX_GPIO134_OUT", 0x160,
OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
MUX_CFG_34XX("AF4_34XX_GPIO135_OUT", 0x162,
OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
MUX_CFG_34XX("AE4_34XX_GPIO136_OUT", 0x164,
OMAP34XX_MUX_MODE4 | OMAP34XX_PIN_OUTPUT)
MUX_CFG_34XX("AF6_34XX_GPIO140_UP", 0x16c,

Просмотреть файл

@ -110,7 +110,7 @@ static struct plat_serial8250_port serial_platform_data2[] = {
.uartclk = OMAP24XX_BASE_BAUD * 16,
}, {
#ifdef CONFIG_ARCH_OMAP4
.membase = IO_ADDRESS(OMAP_UART4_BASE),
.membase = OMAP2_IO_ADDRESS(OMAP_UART4_BASE),
.mapbase = OMAP_UART4_BASE,
.irq = 70,
.flags = UPF_BOOT_AUTOCONF,
@ -126,7 +126,7 @@ static struct plat_serial8250_port serial_platform_data2[] = {
#ifdef CONFIG_ARCH_OMAP4
static struct plat_serial8250_port serial_platform_data3[] = {
{
.membase = IO_ADDRESS(OMAP_UART4_BASE),
.membase = OMAP2_IO_ADDRESS(OMAP_UART4_BASE),
.mapbase = OMAP_UART4_BASE,
.irq = 70,
.flags = UPF_BOOT_AUTOCONF,
@ -579,7 +579,7 @@ static struct omap_uart_state omap_uart[OMAP_MAX_NR_PORTS] = {
{
.pdev = {
.name = "serial8250",
.id = 3
.id = 3,
.dev = {
.platform_data = serial_platform_data3,
},

Просмотреть файл

@ -250,7 +250,7 @@ static struct gpio_bank gpio_bank_730[7] = {
#ifdef CONFIG_ARCH_OMAP850
static struct gpio_bank gpio_bank_850[7] = {
{ OMAP1_MPUIO_BASE, INT_850_MPUIO, IH_MPUIO_BASE, METHOD_MPUIO },
{ OMAP1_MPUIO_VBASE, INT_850_MPUIO, IH_MPUIO_BASE, METHOD_MPUIO },
{ OMAP850_GPIO1_BASE, INT_850_GPIO_BANK1, IH_GPIO_BASE, METHOD_GPIO_850 },
{ OMAP850_GPIO2_BASE, INT_850_GPIO_BANK2, IH_GPIO_BASE + 32, METHOD_GPIO_850 },
{ OMAP850_GPIO3_BASE, INT_850_GPIO_BANK3, IH_GPIO_BASE + 64, METHOD_GPIO_850 },

Просмотреть файл

@ -10,6 +10,8 @@
#ifndef ASMARM_ARCH_KEYPAD_H
#define ASMARM_ARCH_KEYPAD_H
#include <linux/input/matrix_keypad.h>
struct omap_kp_platform_data {
int rows;
int cols;
@ -35,9 +37,6 @@ struct omap_kp_platform_data {
#define KEY_PERSISTENT 0x00800000
#define KEYNUM_MASK 0x00EFFFFF
#define KEY(col, row, val) (((col) << 28) | ((row) << 24) | (val))
#define PERSISTENT_KEY(col, row) (((col) << 28) | ((row) << 24) | \
KEY_PERSISTENT)
#endif

Просмотреть файл

@ -840,12 +840,14 @@ enum omap34xx_index {
*/
AF26_34XX_GPIO0,
AF22_34XX_GPIO9,
AG9_34XX_GPIO23,
AH8_34XX_GPIO29,
U8_34XX_GPIO54_OUT,
U8_34XX_GPIO54_DOWN,
L8_34XX_GPIO63,
G25_34XX_GPIO86_OUT,
AG4_34XX_GPIO134_OUT,
AF4_34XX_GPIO135_OUT,
AE4_34XX_GPIO136_OUT,
AF6_34XX_GPIO140_UP,
AE6_34XX_GPIO141,

Просмотреть файл

@ -199,7 +199,8 @@ static void *vmap_sg(const struct sg_table *sgt)
va += bytes;
}
flush_cache_vmap(new->addr, new->addr + total);
flush_cache_vmap((unsigned long)new->addr,
(unsigned long)(new->addr + total));
return new->addr;
err_out:
@ -390,7 +391,7 @@ static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va)
}
va_end = _va + PAGE_SIZE * i;
flush_cache_vmap(_va, va_end);
flush_cache_vmap((unsigned long)_va, (unsigned long)va_end);
}
static inline void sgtable_drain_vmalloc(struct sg_table *sgt)

Просмотреть файл

@ -2,8 +2,11 @@
#define _ARCH_MCI_H
struct s3c24xx_mci_pdata {
unsigned int no_wprotect : 1;
unsigned int no_detect : 1;
unsigned int wprotect_invert : 1;
unsigned int detect_invert : 1; /* set => detect active high. */
unsigned int use_dma : 1;
unsigned int gpio_detect;
unsigned int gpio_wprotect;

Просмотреть файл

@ -48,7 +48,7 @@ coreb_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned l
return ret;
}
static struct file_operations coreb_fops = {
static const struct file_operations coreb_fops = {
.owner = THIS_MODULE,
.ioctl = coreb_ioctl,
};

Просмотреть файл

@ -244,7 +244,7 @@ static unsigned sync_serial_prescale_shadow;
#define NUMBER_OF_PORTS 2
static struct file_operations sync_serial_fops = {
static const struct file_operations sync_serial_fops = {
.owner = THIS_MODULE,
.write = sync_serial_write,
.read = sync_serial_read,

Просмотреть файл

@ -855,7 +855,7 @@ gpio_leds_ioctl(unsigned int cmd, unsigned long arg)
return 0;
}
struct file_operations gpio_fops = {
static const struct file_operations gpio_fops = {
.owner = THIS_MODULE,
.poll = gpio_poll,
.ioctl = gpio_ioctl,

Просмотреть файл

@ -1,8 +1,16 @@
#ifndef __M68K_HARDIRQ_H
#define __M68K_HARDIRQ_H
#include <linux/threads.h>
#include <linux/cache.h>
/* entry.S is sensitive to the offsets of these fields */
typedef struct {
unsigned int __softirq_pending;
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#define HARDIRQ_BITS 8
#include <asm-generic/hardirq.h>
#endif

Просмотреть файл

@ -175,7 +175,7 @@ static dbdev_tab_t dbdev_tab[] = {
#define DBDEV_TAB_SIZE ARRAY_SIZE(dbdev_tab)
#ifdef CONFIG_PM
static u32 au1xxx_dbdma_pm_regs[NUM_DBDMA_CHANS + 1][8];
static u32 au1xxx_dbdma_pm_regs[NUM_DBDMA_CHANS + 1][6];
#endif
@ -993,14 +993,13 @@ void au1xxx_dbdma_suspend(void)
au1xxx_dbdma_pm_regs[0][3] = au_readl(addr + 0x0c);
/* save channel configurations */
for (i = 1, addr = DDMA_CHANNEL_BASE; i < NUM_DBDMA_CHANS; i++) {
for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) {
au1xxx_dbdma_pm_regs[i][0] = au_readl(addr + 0x00);
au1xxx_dbdma_pm_regs[i][1] = au_readl(addr + 0x04);
au1xxx_dbdma_pm_regs[i][2] = au_readl(addr + 0x08);
au1xxx_dbdma_pm_regs[i][3] = au_readl(addr + 0x0c);
au1xxx_dbdma_pm_regs[i][4] = au_readl(addr + 0x10);
au1xxx_dbdma_pm_regs[i][5] = au_readl(addr + 0x14);
au1xxx_dbdma_pm_regs[i][6] = au_readl(addr + 0x18);
/* halt channel */
au_writel(au1xxx_dbdma_pm_regs[i][0] & ~1, addr + 0x00);
@ -1027,14 +1026,13 @@ void au1xxx_dbdma_resume(void)
au_writel(au1xxx_dbdma_pm_regs[0][3], addr + 0x0c);
/* restore channel configurations */
for (i = 1, addr = DDMA_CHANNEL_BASE; i < NUM_DBDMA_CHANS; i++) {
for (i = 1, addr = DDMA_CHANNEL_BASE; i <= NUM_DBDMA_CHANS; i++) {
au_writel(au1xxx_dbdma_pm_regs[i][0], addr + 0x00);
au_writel(au1xxx_dbdma_pm_regs[i][1], addr + 0x04);
au_writel(au1xxx_dbdma_pm_regs[i][2], addr + 0x08);
au_writel(au1xxx_dbdma_pm_regs[i][3], addr + 0x0c);
au_writel(au1xxx_dbdma_pm_regs[i][4], addr + 0x10);
au_writel(au1xxx_dbdma_pm_regs[i][5], addr + 0x14);
au_writel(au1xxx_dbdma_pm_regs[i][6], addr + 0x18);
au_sync();
addr += 0x100; /* next channel base */
}

Просмотреть файл

@ -112,10 +112,8 @@ static int iodev_open(struct inode *i, struct file *f)
{
int ret;
lock_kernel();
ret = request_irq(iodev_irq, iodev_irqhdl, IRQF_DISABLED,
iodev_name, &miscdev);
unlock_kernel();
return ret;
}

Просмотреть файл

@ -1,5 +1,5 @@
obj-y += clk.o cpu.o cs.o gpio.o irq.o prom.o setup.o timer.o \
dev-dsp.o dev-enet.o
dev-dsp.o dev-enet.o dev-pcmcia.o dev-uart.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-y += boards/

Просмотреть файл

@ -20,10 +20,11 @@
#include <bcm63xx_cpu.h>
#include <bcm63xx_regs.h>
#include <bcm63xx_io.h>
#include <bcm63xx_board.h>
#include <bcm63xx_dev_pci.h>
#include <bcm63xx_dev_enet.h>
#include <bcm63xx_dev_dsp.h>
#include <bcm63xx_dev_pcmcia.h>
#include <bcm63xx_dev_uart.h>
#include <board_bcm963xx.h>
#define PFX "board_bcm963xx: "
@ -793,6 +794,11 @@ int __init board_register_devices(void)
{
u32 val;
bcm63xx_uart_register();
if (board.has_pccard)
bcm63xx_pcmcia_register();
if (board.has_enet0 &&
!board_get_mac_address(board.enet0.mac_addr))
bcm63xx_enet_register(0, &board.enet0);

Просмотреть файл

@ -0,0 +1,144 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <asm/bootinfo.h>
#include <linux/platform_device.h>
#include <bcm63xx_cs.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_dev_pcmcia.h>
#include <bcm63xx_io.h>
#include <bcm63xx_regs.h>
static struct resource pcmcia_resources[] = {
/* pcmcia registers */
{
/* start & end filled at runtime */
.flags = IORESOURCE_MEM,
},
/* pcmcia memory zone resources */
{
.start = BCM_PCMCIA_COMMON_BASE_PA,
.end = BCM_PCMCIA_COMMON_END_PA,
.flags = IORESOURCE_MEM,
},
{
.start = BCM_PCMCIA_ATTR_BASE_PA,
.end = BCM_PCMCIA_ATTR_END_PA,
.flags = IORESOURCE_MEM,
},
{
.start = BCM_PCMCIA_IO_BASE_PA,
.end = BCM_PCMCIA_IO_END_PA,
.flags = IORESOURCE_MEM,
},
/* PCMCIA irq */
{
/* start filled at runtime */
.flags = IORESOURCE_IRQ,
},
/* declare PCMCIA IO resource also */
{
.start = BCM_PCMCIA_IO_BASE_PA,
.end = BCM_PCMCIA_IO_END_PA,
.flags = IORESOURCE_IO,
},
};
static struct bcm63xx_pcmcia_platform_data pd;
static struct platform_device bcm63xx_pcmcia_device = {
.name = "bcm63xx_pcmcia",
.id = 0,
.num_resources = ARRAY_SIZE(pcmcia_resources),
.resource = pcmcia_resources,
.dev = {
.platform_data = &pd,
},
};
static int __init config_pcmcia_cs(unsigned int cs,
u32 base, unsigned int size)
{
int ret;
ret = bcm63xx_set_cs_status(cs, 0);
if (!ret)
ret = bcm63xx_set_cs_base(cs, base, size);
if (!ret)
ret = bcm63xx_set_cs_status(cs, 1);
return ret;
}
static const __initdata struct {
unsigned int cs;
unsigned int base;
unsigned int size;
} pcmcia_cs[3] = {
{
.cs = MPI_CS_PCMCIA_COMMON,
.base = BCM_PCMCIA_COMMON_BASE_PA,
.size = BCM_PCMCIA_COMMON_SIZE
},
{
.cs = MPI_CS_PCMCIA_ATTR,
.base = BCM_PCMCIA_ATTR_BASE_PA,
.size = BCM_PCMCIA_ATTR_SIZE
},
{
.cs = MPI_CS_PCMCIA_IO,
.base = BCM_PCMCIA_IO_BASE_PA,
.size = BCM_PCMCIA_IO_SIZE
},
};
int __init bcm63xx_pcmcia_register(void)
{
int ret, i;
if (!BCMCPU_IS_6348() && !BCMCPU_IS_6358())
return 0;
/* use correct pcmcia ready gpio depending on processor */
switch (bcm63xx_get_cpu_id()) {
case BCM6348_CPU_ID:
pd.ready_gpio = 22;
break;
case BCM6358_CPU_ID:
pd.ready_gpio = 18;
break;
default:
return -ENODEV;
}
pcmcia_resources[0].start = bcm63xx_regset_address(RSET_PCMCIA);
pcmcia_resources[0].end = pcmcia_resources[0].start +
RSET_PCMCIA_SIZE - 1;
pcmcia_resources[4].start = bcm63xx_get_irq_number(IRQ_PCMCIA);
/* configure pcmcia chip selects */
for (i = 0; i < 3; i++) {
ret = config_pcmcia_cs(pcmcia_cs[i].cs,
pcmcia_cs[i].base,
pcmcia_cs[i].size);
if (ret)
goto out_err;
}
return platform_device_register(&bcm63xx_pcmcia_device);
out_err:
printk(KERN_ERR "unable to set pcmcia chip select\n");
return ret;
}

Просмотреть файл

@ -0,0 +1,41 @@
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
#include <bcm63xx_cpu.h>
#include <bcm63xx_dev_uart.h>
static struct resource uart_resources[] = {
{
.start = -1, /* filled at runtime */
.end = -1, /* filled at runtime */
.flags = IORESOURCE_MEM,
},
{
.start = -1, /* filled at runtime */
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bcm63xx_uart_device = {
.name = "bcm63xx_uart",
.id = 0,
.num_resources = ARRAY_SIZE(uart_resources),
.resource = uart_resources,
};
int __init bcm63xx_uart_register(void)
{
uart_resources[0].start = bcm63xx_regset_address(RSET_UART0);
uart_resources[0].end = uart_resources[0].start;
uart_resources[0].end += RSET_UART_SIZE - 1;
uart_resources[1].start = bcm63xx_get_irq_number(IRQ_UART0);
return platform_device_register(&bcm63xx_uart_device);
}

Просмотреть файл

@ -0,0 +1,13 @@
#ifndef BCM63XX_DEV_PCMCIA_H_
#define BCM63XX_DEV_PCMCIA_H_
/*
* PCMCIA driver platform data
*/
struct bcm63xx_pcmcia_platform_data {
unsigned int ready_gpio;
};
int bcm63xx_pcmcia_register(void);
#endif /* BCM63XX_DEV_PCMCIA_H_ */

Просмотреть файл

@ -0,0 +1,6 @@
#ifndef BCM63XX_DEV_UART_H_
#define BCM63XX_DEV_UART_H_
int bcm63xx_uart_register(void);
#endif /* BCM63XX_DEV_UART_H_ */

Просмотреть файл

@ -77,7 +77,18 @@ extern void play_dead(void);
extern asmlinkage void smp_call_function_interrupt(void);
extern void arch_send_call_function_single_ipi(int cpu);
extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
static inline void arch_send_call_function_single_ipi(int cpu)
{
extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->send_ipi_mask(&cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
}
static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
}
#endif /* __ASM_SMP_H */

Просмотреть файл

@ -12,17 +12,17 @@
#if defined(__MIPSEB__)
# include <linux/unaligned/be_struct.h>
# include <linux/unaligned/le_byteshift.h>
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_be
# define put_unaligned __put_unaligned_be
#elif defined(__MIPSEL__)
# include <linux/unaligned/le_struct.h>
# include <linux/unaligned/be_byteshift.h>
# include <linux/unaligned/generic.h>
# define get_unaligned __get_unaligned_le
# define put_unaligned __put_unaligned_le
#else
# error "MIPS, but neither __MIPSEB__, nor __MIPSEL__???"
#endif
# include <linux/unaligned/generic.h>
#endif /* _ASM_MIPS_UNALIGNED_H */

Просмотреть файл

@ -172,13 +172,20 @@ static unsigned int translate_open_flags(int flags)
}
static void sp_setfsuidgid( uid_t uid, gid_t gid)
static int sp_setfsuidgid(uid_t uid, gid_t gid)
{
current->cred->fsuid = uid;
current->cred->fsgid = gid;
struct cred *new;
key_fsuid_changed(current);
key_fsgid_changed(current);
new = prepare_creds();
if (!new)
return -ENOMEM;
new->fsuid = uid;
new->fsgid = gid;
commit_creds(new);
return 0;
}
/*
@ -196,7 +203,7 @@ void sp_work_handle_request(void)
mm_segment_t old_fs;
struct timeval tv;
struct timezone tz;
int cmd;
int err, cmd;
char *vcwd;
int size;
@ -225,8 +232,11 @@ void sp_work_handle_request(void)
/* Run the syscall at the privilege of the user who loaded the
SP program */
if (vpe_getuid(tclimit))
sp_setfsuidgid(vpe_getuid(tclimit), vpe_getgid(tclimit));
if (vpe_getuid(tclimit)) {
err = sp_setfsuidgid(vpe_getuid(tclimit), vpe_getgid(tclimit));
if (!err)
pr_err("Change of creds failed\n");
}
switch (sc.cmd) {
/* needs the flags argument translating from SDE kit to
@ -283,8 +293,11 @@ void sp_work_handle_request(void)
break;
} /* switch */
if (vpe_getuid(tclimit))
sp_setfsuidgid( 0, 0);
if (vpe_getuid(tclimit)) {
err = sp_setfsuidgid(0, 0);
if (!err)
pr_err("restoring old creds failed\n");
}
old_fs = get_fs();
set_fs(KERNEL_DS);

Просмотреть файл

@ -72,8 +72,9 @@ static void rtlx_dispatch(void)
*/
static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
{
unsigned int vpeflags;
unsigned long flags;
int i;
unsigned int flags, vpeflags;
/* Ought not to be strictly necessary for SMTC builds */
local_irq_save(flags);
@ -392,20 +393,12 @@ out:
static int file_open(struct inode *inode, struct file *filp)
{
int minor = iminor(inode);
int err;
lock_kernel();
err = rtlx_open(minor, (filp->f_flags & O_NONBLOCK) ? 0 : 1);
unlock_kernel();
return err;
return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1);
}
static int file_release(struct inode *inode, struct file *filp)
{
int minor = iminor(inode);
return rtlx_release(minor);
return rtlx_release(iminor(inode));
}
static unsigned int file_poll(struct file *file, poll_table * wait)

Просмотреть файл

@ -32,7 +32,6 @@
#include <linux/cpumask.h>
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/smp.h>
#include <asm/atomic.h>
#include <asm/cpu.h>
@ -128,19 +127,6 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_idle();
}
void arch_send_call_function_ipi_mask(const struct cpumask *mask)
{
mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
}
/*
* We reuse the same vector for the single IPI
*/
void arch_send_call_function_single_ipi(int cpu)
{
mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION);
}
/*
* Call into both interrupt handlers, as we share the IPI for them
*/

Просмотреть файл

@ -1098,9 +1098,8 @@ static void ipi_irq_dispatch(void)
static struct irqaction irq_ipi = {
.handler = ipi_interrupt,
.flags = IRQF_DISABLED,
.name = "SMTC_IPI",
.flags = IRQF_PERCPU
.flags = IRQF_DISABLED | IRQF_PERCPU,
.name = "SMTC_IPI"
};
static void setup_cross_vpe_interrupts(unsigned int nvpe)

Просмотреть файл

@ -144,14 +144,15 @@ struct tc {
};
struct {
/* Virtual processing elements */
struct list_head vpe_list;
/* Thread contexts */
struct list_head tc_list;
spinlock_t vpe_list_lock;
struct list_head vpe_list; /* Virtual processing elements */
spinlock_t tc_list_lock;
struct list_head tc_list; /* Thread contexts */
} vpecontrol = {
.vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
.tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
.vpe_list_lock = SPIN_LOCK_UNLOCKED,
.vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
.tc_list_lock = SPIN_LOCK_UNLOCKED,
.tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
};
static void release_progmem(void *ptr);
@ -159,28 +160,38 @@ static void release_progmem(void *ptr);
/* get the vpe associated with this minor */
static struct vpe *get_vpe(int minor)
{
struct vpe *v;
struct vpe *res, *v;
if (!cpu_has_mipsmt)
return NULL;
res = NULL;
spin_lock(&vpecontrol.vpe_list_lock);
list_for_each_entry(v, &vpecontrol.vpe_list, list) {
if (v->minor == minor)
return v;
if (v->minor == minor) {
res = v;
break;
}
}
spin_unlock(&vpecontrol.vpe_list_lock);
return NULL;
return res;
}
/* get the vpe associated with this minor */
static struct tc *get_tc(int index)
{
struct tc *t;
struct tc *res, *t;
res = NULL;
spin_lock(&vpecontrol.tc_list_lock);
list_for_each_entry(t, &vpecontrol.tc_list, list) {
if (t->index == index)
return t;
if (t->index == index) {
res = t;
break;
}
}
spin_unlock(&vpecontrol.tc_list_lock);
return NULL;
}
@ -190,15 +201,17 @@ static struct vpe *alloc_vpe(int minor)
{
struct vpe *v;
if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) {
if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL)
return NULL;
}
INIT_LIST_HEAD(&v->tc);
spin_lock(&vpecontrol.vpe_list_lock);
list_add_tail(&v->list, &vpecontrol.vpe_list);
spin_unlock(&vpecontrol.vpe_list_lock);
INIT_LIST_HEAD(&v->notify);
v->minor = minor;
return v;
}
@ -212,7 +225,10 @@ static struct tc *alloc_tc(int index)
INIT_LIST_HEAD(&tc->tc);
tc->index = index;
spin_lock(&vpecontrol.tc_list_lock);
list_add_tail(&tc->list, &vpecontrol.tc_list);
spin_unlock(&vpecontrol.tc_list_lock);
out:
return tc;
@ -227,7 +243,7 @@ static void release_vpe(struct vpe *v)
kfree(v);
}
static void dump_mtregs(void)
static void __maybe_unused dump_mtregs(void)
{
unsigned long val;
@ -1048,20 +1064,19 @@ static int vpe_open(struct inode *inode, struct file *filp)
enum vpe_state state;
struct vpe_notifications *not;
struct vpe *v;
int ret, err = 0;
int ret;
lock_kernel();
if (minor != iminor(inode)) {
/* assume only 1 device at the moment. */
printk(KERN_WARNING "VPE loader: only vpe1 is supported\n");
err = -ENODEV;
goto out;
pr_warning("VPE loader: only vpe1 is supported\n");
return -ENODEV;
}
if ((v = get_vpe(tclimit)) == NULL) {
printk(KERN_WARNING "VPE loader: unable to get vpe\n");
err = -ENODEV;
goto out;
pr_warning("VPE loader: unable to get vpe\n");
return -ENODEV;
}
state = xchg(&v->state, VPE_STATE_INUSE);
@ -1101,8 +1116,8 @@ static int vpe_open(struct inode *inode, struct file *filp)
v->shared_ptr = NULL;
v->__start = 0;
out:
unlock_kernel();
return 0;
}
@ -1594,14 +1609,14 @@ static void __exit vpe_module_exit(void)
{
struct vpe *v, *n;
list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
if (v->state != VPE_STATE_UNUSED) {
release_vpe(v);
}
}
device_del(&vpe_device);
unregister_chrdev(major, module_name);
/* No locking needed here */
list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
if (v->state != VPE_STATE_UNUSED)
release_vpe(v);
}
}
module_init(vpe_module_init);

Просмотреть файл

@ -32,6 +32,11 @@ static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
*/
static void mips_sc_inv(unsigned long addr, unsigned long size)
{
unsigned long lsize = cpu_scache_line_size();
unsigned long almask = ~(lsize - 1);
cache_op(Hit_Writeback_Inv_SD, addr & almask);
cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask);
blast_inv_scache_range(addr, addr + size);
}

Просмотреть файл

@ -44,7 +44,7 @@ static struct loongson2_register_config {
unsigned int ctrl;
unsigned long long reset_counter1;
unsigned long long reset_counter2;
int cnt1_enalbed, cnt2_enalbed;
int cnt1_enabled, cnt2_enabled;
} reg;
DEFINE_SPINLOCK(sample_lock);
@ -81,8 +81,8 @@ static void loongson2_reg_setup(struct op_counter_config *cfg)
reg.ctrl = ctrl;
reg.cnt1_enalbed = cfg[0].enabled;
reg.cnt2_enalbed = cfg[1].enabled;
reg.cnt1_enabled = cfg[0].enabled;
reg.cnt2_enabled = cfg[1].enabled;
}
@ -99,7 +99,7 @@ static void loongson2_cpu_setup(void *args)
static void loongson2_cpu_start(void *args)
{
/* Start all counters on current CPU */
if (reg.cnt1_enalbed || reg.cnt2_enalbed)
if (reg.cnt1_enabled || reg.cnt2_enabled)
write_c0_perfctrl(reg.ctrl);
}
@ -125,7 +125,7 @@ static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
*/
/* Check whether the irq belongs to me */
enabled = reg.cnt1_enalbed | reg.cnt2_enalbed;
enabled = reg.cnt1_enabled | reg.cnt2_enabled;
if (!enabled)
return IRQ_NONE;
@ -136,12 +136,12 @@ static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
spin_lock_irqsave(&sample_lock, flags);
if (counter1 & LOONGSON2_PERFCNT_OVERFLOW) {
if (reg.cnt1_enalbed)
if (reg.cnt1_enabled)
oprofile_add_sample(regs, 0);
counter1 = reg.reset_counter1;
}
if (counter2 & LOONGSON2_PERFCNT_OVERFLOW) {
if (reg.cnt2_enalbed)
if (reg.cnt2_enabled)
oprofile_add_sample(regs, 1);
counter2 = reg.reset_counter2;
}

Просмотреть файл

@ -385,6 +385,7 @@ int msp_pcibios_config_access(unsigned char access_type,
unsigned long intr;
unsigned long value;
static char pciirqflag;
int ret;
#if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL)
unsigned int vpe_status;
#endif
@ -402,11 +403,13 @@ int msp_pcibios_config_access(unsigned char access_type,
* allocation assigns an interrupt handler to the interrupt.
*/
if (pciirqflag == 0) {
request_irq(MSP_INT_PCI,/* Hardcoded internal MSP7120 wiring */
ret = request_irq(MSP_INT_PCI,/* Hardcoded internal MSP7120 wiring */
bpci_interrupt,
IRQF_SHARED | IRQF_DISABLED,
"PMC MSP PCI Host",
preg);
if (ret != 0)
return ret;
pciirqflag = ~0;
}

Просмотреть файл

@ -165,7 +165,7 @@ static void ip27_send_ipi_single(int destid, unsigned int action)
REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
}
static void ip27_send_ipi(const struct cpumask *mask, unsigned int action)
static void ip27_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;

Просмотреть файл

@ -117,10 +117,6 @@ static int bcm1480_set_affinity(unsigned int irq, const struct cpumask *mask)
unsigned long flags;
unsigned int irq_dirty;
if (cpumask_weight(mask) != 1) {
printk("attempted to set irq affinity for irq %d to multiple CPUs\n", irq);
return -1;
}
i = cpumask_first(mask);
/* Convert logical CPU to physical CPU */

Просмотреть файл

@ -403,36 +403,31 @@ static int sbprof_zbprof_stop(void)
static int sbprof_tb_open(struct inode *inode, struct file *filp)
{
int minor;
int err = 0;
lock_kernel();
minor = iminor(inode);
if (minor != 0) {
err = -ENODEV;
goto out;
}
if (minor != 0)
return -ENODEV;
if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED) {
err = -EBUSY;
goto out;
}
if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED)
return -EBUSY;
memset(&sbp, 0, sizeof(struct sbprof_tb));
sbp.sbprof_tbbuf = vmalloc(MAX_TBSAMPLE_BYTES);
if (!sbp.sbprof_tbbuf) {
err = -ENOMEM;
goto out;
sbp.open = SB_CLOSED;
wmb();
return -ENOMEM;
}
memset(sbp.sbprof_tbbuf, 0, MAX_TBSAMPLE_BYTES);
init_waitqueue_head(&sbp.tb_sync);
init_waitqueue_head(&sbp.tb_read);
mutex_init(&sbp.lock);
sbp.open = SB_OPEN;
wmb();
out:
unlock_kernel();
return err;
return 0;
}
static int sbprof_tb_release(struct inode *inode, struct file *filp)
@ -440,7 +435,7 @@ static int sbprof_tb_release(struct inode *inode, struct file *filp)
int minor;
minor = iminor(inode);
if (minor != 0 || !sbp.open)
if (minor != 0 || sbp.open != SB_CLOSED)
return -ENODEV;
mutex_lock(&sbp.lock);
@ -449,7 +444,8 @@ static int sbprof_tb_release(struct inode *inode, struct file *filp)
sbprof_zbprof_stop();
vfree(sbp.sbprof_tbbuf);
sbp.open = 0;
sbp.open = SB_CLOSED;
wmb();
mutex_unlock(&sbp.lock);
@ -583,7 +579,8 @@ static int __init sbprof_tb_init(void)
}
tb_dev = dev;
sbp.open = 0;
sbp.open = SB_CLOSED;
wmb();
tb_period = zbbus_mhz * 10000LL;
pr_info(DEVNAME ": initialized - tb_period = %lld\n",
(long long) tb_period);

Просмотреть файл

@ -106,7 +106,7 @@ void read_persistent_clock(struct timespec *ts)
break;
}
ts->tv_sec = sec;
tv->tv_nsec = 0;
ts->tv_nsec = 0;
}
int rtc_mips_set_time(unsigned long sec)

Просмотреть файл

@ -129,42 +129,47 @@ extern int fixup_exception(struct pt_regs *regs);
struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct *)(x))
#define __get_user_nocheck(x, ptr, size) \
({ \
__typeof(*(ptr)) __gu_val; \
unsigned long __gu_addr; \
int __gu_err; \
__gu_addr = (unsigned long) (ptr); \
switch (size) { \
case 1: __get_user_asm("bu"); break; \
case 2: __get_user_asm("hu"); break; \
case 4: __get_user_asm("" ); break; \
default: __get_user_unknown(); break; \
} \
x = (__typeof__(*(ptr))) __gu_val; \
__gu_err; \
#define __get_user_nocheck(x, ptr, size) \
({ \
unsigned long __gu_addr; \
int __gu_err; \
__gu_addr = (unsigned long) (ptr); \
switch (size) { \
case 1: { \
unsigned char __gu_val; \
__get_user_asm("bu"); \
(x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
break; \
} \
case 2: { \
unsigned short __gu_val; \
__get_user_asm("hu"); \
(x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
break; \
} \
case 4: { \
unsigned int __gu_val; \
__get_user_asm(""); \
(x) = *(__force __typeof__(*(ptr))*) &__gu_val; \
break; \
} \
default: \
__get_user_unknown(); \
break; \
} \
__gu_err; \
})
#define __get_user_check(x, ptr, size) \
({ \
__typeof__(*(ptr)) __gu_val; \
unsigned long __gu_addr; \
int __gu_err; \
__gu_addr = (unsigned long) (ptr); \
if (likely(__access_ok(__gu_addr,size))) { \
switch (size) { \
case 1: __get_user_asm("bu"); break; \
case 2: __get_user_asm("hu"); break; \
case 4: __get_user_asm("" ); break; \
default: __get_user_unknown(); break; \
} \
} \
else { \
__gu_err = -EFAULT; \
__gu_val = 0; \
} \
x = (__typeof__(*(ptr))) __gu_val; \
__gu_err; \
#define __get_user_check(x, ptr, size) \
({ \
int _e; \
if (likely(__access_ok((unsigned long) (ptr), (size)))) \
_e = __get_user_nocheck((x), (ptr), (size)); \
else { \
_e = -EFAULT; \
(x) = (__typeof__(x))0; \
} \
_e; \
})
#define __get_user_asm(INSN) \

Просмотреть файл

@ -20,9 +20,9 @@ extern unsigned long mn10300_ioclk; /* IOCLK (crystal speed) in HZ */
extern unsigned long mn10300_iobclk;
extern unsigned long mn10300_tsc_per_HZ;
#define MN10300_IOCLK ((unsigned long)mn10300_ioclk)
#define MN10300_IOCLK mn10300_ioclk
/* If this processors has a another clock, uncomment the below. */
/* #define MN10300_IOBCLK ((unsigned long)mn10300_iobclk) */
/* #define MN10300_IOBCLK mn10300_iobclk */
#else /* !CONFIG_MN10300_RTC */
@ -35,7 +35,7 @@ extern unsigned long mn10300_tsc_per_HZ;
#define MN10300_TSCCLK MN10300_IOCLK
#ifdef CONFIG_MN10300_RTC
#define MN10300_TSC_PER_HZ ((unsigned long)mn10300_tsc_per_HZ)
#define MN10300_TSC_PER_HZ mn10300_tsc_per_HZ
#else /* !CONFIG_MN10300_RTC */
#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ)
#endif /* !CONFIG_MN10300_RTC */

Просмотреть файл

@ -20,9 +20,9 @@ extern unsigned long mn10300_ioclk; /* IOCLK (crystal speed) in HZ */
extern unsigned long mn10300_iobclk;
extern unsigned long mn10300_tsc_per_HZ;
#define MN10300_IOCLK ((unsigned long)mn10300_ioclk)
#define MN10300_IOCLK mn10300_ioclk
/* If this processors has a another clock, uncomment the below. */
/* #define MN10300_IOBCLK ((unsigned long)mn10300_iobclk) */
/* #define MN10300_IOBCLK mn10300_iobclk */
#else /* !CONFIG_MN10300_RTC */
@ -35,7 +35,7 @@ extern unsigned long mn10300_tsc_per_HZ;
#define MN10300_TSCCLK MN10300_IOCLK
#ifdef CONFIG_MN10300_RTC
#define MN10300_TSC_PER_HZ ((unsigned long)mn10300_tsc_per_HZ)
#define MN10300_TSC_PER_HZ mn10300_tsc_per_HZ
#else /* !CONFIG_MN10300_RTC */
#define MN10300_TSC_PER_HZ (MN10300_TSCCLK/HZ)
#endif /* !CONFIG_MN10300_RTC */

Просмотреть файл

@ -201,7 +201,7 @@ static int kvmppc_exit_timing_open(struct inode *inode, struct file *file)
return single_open(file, kvmppc_exit_timing_show, inode->i_private);
}
static struct file_operations kvmppc_exit_timing_fops = {
static const struct file_operations kvmppc_exit_timing_fops = {
.owner = THIS_MODULE,
.open = kvmppc_exit_timing_open,
.read = seq_read,

Просмотреть файл

@ -147,7 +147,7 @@ static int __fops ## _open(struct inode *inode, struct file *file) \
__simple_attr_check_format(__fmt, 0ull); \
return spufs_attr_open(inode, file, __get, __set, __fmt); \
} \
static struct file_operations __fops = { \
static const struct file_operations __fops = { \
.owner = THIS_MODULE, \
.open = __fops ## _open, \
.release = spufs_attr_release, \

Просмотреть файл

@ -209,7 +209,7 @@ static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len,
return n_read * sizeof(struct dtl_entry);
}
static struct file_operations dtl_fops = {
static const struct file_operations dtl_fops = {
.open = dtl_file_open,
.release = dtl_file_release,
.read = dtl_file_read,

Просмотреть файл

@ -102,6 +102,9 @@ config HAVE_SETUP_PER_CPU_AREA
config NEED_PER_CPU_EMBED_FIRST_CHUNK
def_bool y if SPARC64
config NEED_PER_CPU_PAGE_FIRST_CHUNK
def_bool y if SPARC64
config GENERIC_HARDIRQS_NO__DO_IRQ
bool
def_bool y if SPARC64

Просмотреть файл

@ -1420,7 +1420,7 @@ static void __init pcpu_free_bootmem(void *ptr, size_t size)
free_bootmem(__pa(ptr), size);
}
static int pcpu_cpu_distance(unsigned int from, unsigned int to)
static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
{
if (cpu_to_node(from) == cpu_to_node(to))
return LOCAL_DISTANCE;
@ -1428,18 +1428,53 @@ static int pcpu_cpu_distance(unsigned int from, unsigned int to)
return REMOTE_DISTANCE;
}
static void __init pcpu_populate_pte(unsigned long addr)
{
pgd_t *pgd = pgd_offset_k(addr);
pud_t *pud;
pmd_t *pmd;
pud = pud_offset(pgd, addr);
if (pud_none(*pud)) {
pmd_t *new;
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
pud_populate(&init_mm, pud, new);
}
pmd = pmd_offset(pud, addr);
if (!pmd_present(*pmd)) {
pte_t *new;
new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE);
pmd_populate_kernel(&init_mm, pmd, new);
}
}
void __init setup_per_cpu_areas(void)
{
unsigned long delta;
unsigned int cpu;
int rc;
int rc = -EINVAL;
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
PERCPU_DYNAMIC_RESERVE, 4 << 20,
pcpu_cpu_distance, pcpu_alloc_bootmem,
pcpu_free_bootmem);
if (rc)
panic("failed to initialize first chunk (%d)", rc);
if (pcpu_chosen_fc != PCPU_FC_PAGE) {
rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
PERCPU_DYNAMIC_RESERVE, 4 << 20,
pcpu_cpu_distance,
pcpu_alloc_bootmem,
pcpu_free_bootmem);
if (rc)
pr_warning("PERCPU: %s allocator failed (%d), "
"falling back to page size\n",
pcpu_fc_names[pcpu_chosen_fc], rc);
}
if (rc < 0)
rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE,
pcpu_alloc_bootmem,
pcpu_free_bootmem,
pcpu_populate_pte);
if (rc < 0)
panic("cannot initialize percpu area (err=%d)", rc);
delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
for_each_possible_cpu(cpu)

Просмотреть файл

@ -161,7 +161,8 @@ static inline __sum16 csum_ipv6_magic(const struct in6_addr *saddr,
"adcl $0, %0 ;\n"
: "=&r" (sum)
: "r" (saddr), "r" (daddr),
"r" (htonl(len)), "r" (htonl(proto)), "0" (sum));
"r" (htonl(len)), "r" (htonl(proto)), "0" (sum)
: "memory");
return csum_fold(sum);
}

Просмотреть файл

@ -312,19 +312,23 @@ static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
extern unsigned long long cmpxchg_486_u64(volatile void *, u64, u64);
#define cmpxchg64(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
if (likely(boot_cpu_data.x86 > 4)) \
__ret = (__typeof__(*(ptr)))__cmpxchg64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
else \
__ret = (__typeof__(*(ptr)))cmpxchg_486_u64((ptr), \
(unsigned long long)(o), \
(unsigned long long)(n)); \
__ret; \
})
#define cmpxchg64(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (o); \
__typeof__(*(ptr)) __new = (n); \
alternative_io("call cmpxchg8b_emu", \
"lock; cmpxchg8b (%%esi)" , \
X86_FEATURE_CX8, \
"=A" (__ret), \
"S" ((ptr)), "0" (__old), \
"b" ((unsigned int)__new), \
"c" ((unsigned int)(__new>>32)) \
: "memory"); \
__ret; })
#define cmpxchg64_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \

Просмотреть файл

@ -204,10 +204,7 @@ static void print_mce_head(void)
static void print_mce_tail(void)
{
printk(KERN_EMERG "This is not a software problem!\n"
#if (!defined(CONFIG_EDAC) || !defined(CONFIG_CPU_SUP_AMD))
"Run through mcelog --ascii to decode and contact your hardware vendor\n"
#endif
);
"Run through mcelog --ascii to decode and contact your hardware vendor\n");
}
#define PANIC_TIMEOUT 5 /* 5 seconds */

Просмотреть файл

@ -10,6 +10,14 @@
EXPORT_SYMBOL(mcount);
#endif
/*
* Note, this is a prototype to get at the symbol for
* the export, but dont use it from C code, it is used
* by assembly code and is not using C calling convention!
*/
extern void cmpxchg8b_emu(void);
EXPORT_SYMBOL(cmpxchg8b_emu);
/* Networking helper routines. */
EXPORT_SYMBOL(csum_partial_copy_generic);

Просмотреть файл

@ -15,7 +15,7 @@ ifeq ($(CONFIG_X86_32),y)
obj-y += atomic64_32.o
lib-y += checksum_32.o
lib-y += strstr_32.o
lib-y += semaphore_32.o string_32.o
lib-y += semaphore_32.o string_32.o cmpxchg8b_emu.o
lib-$(CONFIG_X86_USE_3DNOW) += mmx_32.o
else

Просмотреть файл

@ -0,0 +1,57 @@
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*
*/
#include <linux/linkage.h>
#include <asm/alternative-asm.h>
#include <asm/frame.h>
#include <asm/dwarf2.h>
.text
/*
* Inputs:
* %esi : memory location to compare
* %eax : low 32 bits of old value
* %edx : high 32 bits of old value
* %ebx : low 32 bits of new value
* %ecx : high 32 bits of new value
*/
ENTRY(cmpxchg8b_emu)
CFI_STARTPROC
#
# Emulate 'cmpxchg8b (%esi)' on UP except we don't
# set the whole ZF thing (caller will just compare
# eax:edx with the expected value)
#
cmpxchg8b_emu:
pushfl
cli
cmpl (%esi), %eax
jne not_same
cmpl 4(%esi), %edx
jne half_same
movl %ebx, (%esi)
movl %ecx, 4(%esi)
popfl
ret
not_same:
movl (%esi), %eax
half_same:
movl 4(%esi), %edx
popfl
ret
CFI_ENDPROC
ENDPROC(cmpxchg8b_emu)

Просмотреть файл

@ -100,7 +100,7 @@ static int xen_array_release(struct inode *inode, struct file *file)
return 0;
}
static struct file_operations u32_array_fops = {
static const struct file_operations u32_array_fops = {
.owner = THIS_MODULE,
.open = u32_array_open,
.release= xen_array_release,

Просмотреть файл

@ -232,10 +232,8 @@ static int ec_poll(struct acpi_ec *ec)
}
advance_transaction(ec, acpi_ec_read_status(ec));
} while (time_before(jiffies, delay));
if (!ec->curr->irq_count ||
(acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF))
if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
break;
/* try restart command if we get any false interrupts */
pr_debug(PREFIX "controller reset, restart transaction\n");
spin_lock_irqsave(&ec->curr_lock, flags);
start_transaction(ec);

Просмотреть файл

@ -1052,6 +1052,8 @@ static void acpi_device_set_id(struct acpi_device *device)
device->flags.bus_address = 1;
}
kfree(info);
/*
* Some devices don't reliably have _HIDs & _CIDs, so add
* synthetic HIDs to make sure drivers can find them.
@ -1325,13 +1327,8 @@ static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
struct acpi_device **child)
{
acpi_status status;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
void *device = NULL;
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
printk(KERN_INFO PREFIX "Enumerating devices from [%s]\n",
(char *) buffer.pointer);
status = acpi_bus_check_add(handle, 0, ops, &device);
if (ACPI_SUCCESS(status))
acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,

Просмотреть файл

@ -285,7 +285,7 @@ static int acpi_video_device_brightness_open_fs(struct inode *inode,
struct file *file);
static ssize_t acpi_video_device_write_brightness(struct file *file,
const char __user *buffer, size_t count, loff_t *data);
static struct file_operations acpi_video_device_brightness_fops = {
static const struct file_operations acpi_video_device_brightness_fops = {
.owner = THIS_MODULE,
.open = acpi_video_device_brightness_open_fs,
.read = seq_read,

Просмотреть файл

@ -1306,14 +1306,6 @@ static void amb_close (struct atm_vcc * atm_vcc) {
return;
}
/********** Set socket options for a VC **********/
// int amb_getsockopt (struct atm_vcc * atm_vcc, int level, int optname, void * optval, int optlen);
/********** Set socket options for a VC **********/
// int amb_setsockopt (struct atm_vcc * atm_vcc, int level, int optname, void * optval, int optlen);
/********** Send **********/
static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {

Просмотреть файл

@ -2031,7 +2031,7 @@ static int eni_getsockopt(struct atm_vcc *vcc,int level,int optname,
static int eni_setsockopt(struct atm_vcc *vcc,int level,int optname,
void __user *optval,int optlen)
void __user *optval,unsigned int optlen)
{
return -EINVAL;
}

Просмотреть файл

@ -1244,7 +1244,7 @@ static int fs_getsockopt(struct atm_vcc *vcc,int level,int optname,
static int fs_setsockopt(struct atm_vcc *vcc,int level,int optname,
void __user *optval,int optlen)
void __user *optval,unsigned int optlen)
{
func_enter ();
func_exit ();

Просмотреть файл

@ -1795,7 +1795,7 @@ fore200e_getsockopt(struct atm_vcc* vcc, int level, int optname, void __user *op
static int
fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, int optlen)
fore200e_setsockopt(struct atm_vcc* vcc, int level, int optname, void __user *optval, unsigned int optlen)
{
/* struct fore200e* fore200e = FORE200E_DEV(vcc->dev); */

Просмотреть файл

@ -921,9 +921,9 @@ out_free_rbpq_base:
he_dev->rbrq_phys);
i = CONFIG_RBPL_SIZE;
out_free_rbpl_virt:
while (--i)
pci_pool_free(he_dev->rbps_pool, he_dev->rbpl_virt[i].virt,
he_dev->rbps_base[i].phys);
while (i--)
pci_pool_free(he_dev->rbpl_pool, he_dev->rbpl_virt[i].virt,
he_dev->rbpl_base[i].phys);
kfree(he_dev->rbpl_virt);
out_free_rbpl_base:
@ -933,11 +933,11 @@ out_free_rbpl_base:
out_destroy_rbpl_pool:
pci_pool_destroy(he_dev->rbpl_pool);
i = CONFIG_RBPL_SIZE;
i = CONFIG_RBPS_SIZE;
out_free_rbps_virt:
while (--i)
pci_pool_free(he_dev->rbpl_pool, he_dev->rbps_virt[i].virt,
he_dev->rbpl_base[i].phys);
while (i--)
pci_pool_free(he_dev->rbps_pool, he_dev->rbps_virt[i].virt,
he_dev->rbps_base[i].phys);
kfree(he_dev->rbps_virt);
out_free_rbps_base:

Просмотреть файл

@ -2590,7 +2590,7 @@ static int hrz_getsockopt (struct atm_vcc * atm_vcc, int level, int optname,
}
static int hrz_setsockopt (struct atm_vcc * atm_vcc, int level, int optname,
void *optval, int optlen) {
void *optval, unsigned int optlen) {
hrz_dev * dev = HRZ_DEV(atm_vcc->dev);
PRINTD (DBG_FLOW|DBG_VCC, "hrz_setsockopt");
switch (level) {

Просмотреть файл

@ -2862,7 +2862,7 @@ static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname,
}
static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname,
void __user *optval, int optlen)
void __user *optval, unsigned int optlen)
{
IF_EVENT(printk(">ia_setsockopt\n");)
return -EINVAL;

Просмотреть файл

@ -1517,7 +1517,7 @@ static int zatm_getsockopt(struct atm_vcc *vcc,int level,int optname,
static int zatm_setsockopt(struct atm_vcc *vcc,int level,int optname,
void __user *optval,int optlen)
void __user *optval,unsigned int optlen)
{
return -EINVAL;
}

Просмотреть файл

@ -426,7 +426,7 @@ out:
return err;
}
static struct file_operations cciss_proc_fops = {
static const struct file_operations cciss_proc_fops = {
.owner = THIS_MODULE,
.open = cciss_seq_open,
.read = seq_read,

Просмотреть файл

@ -393,7 +393,7 @@ static int apm_open(struct inode * inode, struct file * filp)
return as ? 0 : -ENOMEM;
}
static struct file_operations apm_bios_fops = {
static const struct file_operations apm_bios_fops = {
.owner = THIS_MODULE,
.read = apm_read,
.poll = apm_poll,

Просмотреть файл

@ -217,7 +217,7 @@ static long bfin_otp_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
# define bfin_otp_ioctl NULL
#endif
static struct file_operations bfin_otp_fops = {
static const struct file_operations bfin_otp_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = bfin_otp_ioctl,
.read = bfin_otp_read,

Просмотреть файл

@ -3354,7 +3354,7 @@ static int __init cy_detect_isa(void)
continue;
}
#ifdef MODULE
if (isparam && irq[i])
if (isparam && i < NR_CARDS && irq[i])
cy_isa_irq = irq[i];
else
#endif

Просмотреть файл

@ -116,7 +116,7 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
if (!res)
return -ENOENT;
mem = request_mem_region(res->start, res->end - res->start + 1,
mem = request_mem_region(res->start, resource_size(res),
pdev->name);
if (mem == NULL) {
ret = -EBUSY;
@ -124,7 +124,7 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
}
dev_set_drvdata(&pdev->dev, mem);
rng_base = ioremap(res->start, res->end - res->start + 1);
rng_base = ioremap(res->start, resource_size(res));
if (!rng_base) {
ret = -ENOMEM;
goto err_ioremap;

Просмотреть файл

@ -261,6 +261,9 @@ done:
return 0;
}
/* Traditional BSD devices */
#ifdef CONFIG_LEGACY_PTYS
static int pty_install(struct tty_driver *driver, struct tty_struct *tty)
{
struct tty_struct *o_tty;
@ -310,24 +313,6 @@ free_mem_out:
return -ENOMEM;
}
static const struct tty_operations pty_ops = {
.install = pty_install,
.open = pty_open,
.close = pty_close,
.write = pty_write,
.write_room = pty_write_room,
.flush_buffer = pty_flush_buffer,
.chars_in_buffer = pty_chars_in_buffer,
.unthrottle = pty_unthrottle,
.set_termios = pty_set_termios,
.resize = pty_resize
};
/* Traditional BSD devices */
#ifdef CONFIG_LEGACY_PTYS
static struct tty_driver *pty_driver, *pty_slave_driver;
static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg)
{
@ -341,7 +326,12 @@ static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file,
static int legacy_count = CONFIG_LEGACY_PTY_COUNT;
module_param(legacy_count, int, 0);
static const struct tty_operations pty_ops_bsd = {
/*
* The master side of a pty can do TIOCSPTLCK and thus
* has pty_bsd_ioctl.
*/
static const struct tty_operations master_pty_ops_bsd = {
.install = pty_install,
.open = pty_open,
.close = pty_close,
.write = pty_write,
@ -354,8 +344,23 @@ static const struct tty_operations pty_ops_bsd = {
.resize = pty_resize
};
static const struct tty_operations slave_pty_ops_bsd = {
.install = pty_install,
.open = pty_open,
.close = pty_close,
.write = pty_write,
.write_room = pty_write_room,
.flush_buffer = pty_flush_buffer,
.chars_in_buffer = pty_chars_in_buffer,
.unthrottle = pty_unthrottle,
.set_termios = pty_set_termios,
.resize = pty_resize
};
static void __init legacy_pty_init(void)
{
struct tty_driver *pty_driver, *pty_slave_driver;
if (legacy_count <= 0)
return;
@ -383,7 +388,7 @@ static void __init legacy_pty_init(void)
pty_driver->init_termios.c_ospeed = 38400;
pty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW;
pty_driver->other = pty_slave_driver;
tty_set_operations(pty_driver, &pty_ops);
tty_set_operations(pty_driver, &master_pty_ops_bsd);
pty_slave_driver->owner = THIS_MODULE;
pty_slave_driver->driver_name = "pty_slave";
@ -399,7 +404,7 @@ static void __init legacy_pty_init(void)
pty_slave_driver->flags = TTY_DRIVER_RESET_TERMIOS |
TTY_DRIVER_REAL_RAW;
pty_slave_driver->other = pty_driver;
tty_set_operations(pty_slave_driver, &pty_ops);
tty_set_operations(pty_slave_driver, &slave_pty_ops_bsd);
if (tty_register_driver(pty_driver))
panic("Couldn't register pty driver");

Просмотреть файл

@ -220,8 +220,7 @@ static inline int serial_paranoia_check(struct cyclades_port *info, char *name,
return 1;
}
if ((long)info < (long)(&cy_port[0])
|| (long)(&cy_port[NR_PORTS]) < (long)info) {
if (info < &cy_port[0] || info >= &cy_port[NR_PORTS]) {
printk("Warning: cyclades_port out of range for (%s) in %s\n",
name, routine);
return 1;
@ -520,15 +519,13 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id)
panic("TxInt on debug port!!!");
}
#endif
info = &cy_port[channel];
/* validate the port number (as configured and open) */
if ((channel < 0) || (NR_PORTS <= channel)) {
base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
base_addr[CyTEOIR] = CyNOTRANS;
return IRQ_HANDLED;
}
info = &cy_port[channel];
info->last_active = jiffies;
if (info->tty == 0) {
base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);

Просмотреть файл

@ -981,8 +981,10 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
goto eperm;
if (copy_from_user(&vsa, (struct vt_setactivate __user *)arg,
sizeof(struct vt_setactivate)))
return -EFAULT;
sizeof(struct vt_setactivate))) {
ret = -EFAULT;
goto out;
}
if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
ret = -ENXIO;
else {

Просмотреть файл

@ -559,7 +559,7 @@ static int hwicap_release(struct inode *inode, struct file *file)
return status;
}
static struct file_operations hwicap_fops = {
static const struct file_operations hwicap_fops = {
.owner = THIS_MODULE,
.write = hwicap_write,
.read = hwicap_read,

Просмотреть файл

@ -1487,7 +1487,7 @@ static int gpiolib_open(struct inode *inode, struct file *file)
return single_open(file, gpiolib_show, NULL);
}
static struct file_operations gpiolib_operations = {
static const struct file_operations gpiolib_operations = {
.open = gpiolib_open,
.read = seq_read,
.llseek = seq_lseek,

Просмотреть файл

@ -482,6 +482,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
list_for_each_entry_safe(mode, t, &connector->user_modes, head)
drm_mode_remove(connector, mode);
kfree(connector->fb_helper_private);
mutex_lock(&dev->mode_config.mutex);
drm_mode_object_put(dev, &connector->base);
list_del(&connector->head);

Просмотреть файл

@ -32,6 +32,7 @@
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
#include "drm_fb_helper.h"
static void drm_mode_validate_flag(struct drm_connector *connector,
int flags)
@ -90,7 +91,15 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
list_for_each_entry_safe(mode, t, &connector->modes, head)
mode->status = MODE_UNVERIFIED;
connector->status = connector->funcs->detect(connector);
if (connector->force) {
if (connector->force == DRM_FORCE_ON)
connector->status = connector_status_connected;
else
connector->status = connector_status_disconnected;
if (connector->funcs->force)
connector->funcs->force(connector);
} else
connector->status = connector->funcs->detect(connector);
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("%s is disconnected\n",
@ -267,6 +276,65 @@ static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *con
return NULL;
}
static bool drm_has_cmdline_mode(struct drm_connector *connector)
{
struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
if (!fb_help_conn)
return false;
cmdline_mode = &fb_help_conn->cmdline_mode;
return cmdline_mode->specified;
}
static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
{
struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode = NULL;
if (!fb_help_conn)
return mode;
cmdline_mode = &fb_help_conn->cmdline_mode;
if (cmdline_mode->specified == false)
return mode;
/* attempt to find a matching mode in the list of modes
* we have gotten so far, if not add a CVT mode that conforms
*/
if (cmdline_mode->rb || cmdline_mode->margins)
goto create_mode;
list_for_each_entry(mode, &connector->modes, head) {
/* check width/height */
if (mode->hdisplay != cmdline_mode->xres ||
mode->vdisplay != cmdline_mode->yres)
continue;
if (cmdline_mode->refresh_specified) {
if (mode->vrefresh != cmdline_mode->refresh)
continue;
}
if (cmdline_mode->interlace) {
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
}
return mode;
}
create_mode:
mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
cmdline_mode->yres,
cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
cmdline_mode->rb, cmdline_mode->interlace,
cmdline_mode->margins);
list_add(&mode->head, &connector->modes);
return mode;
}
static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
@ -317,10 +385,16 @@ static bool drm_target_preferred(struct drm_device *dev,
continue;
}
DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
connector->base.id);
DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
connector->base.id);
modes[i] = drm_has_preferred_mode(connector, width, height);
/* got for command line mode first */
modes[i] = drm_pick_cmdline_mode(connector, width, height);
if (!modes[i]) {
DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
connector->base.id);
modes[i] = drm_has_preferred_mode(connector, width, height);
}
/* No preferred modes, pick one off the list */
if (!modes[i] && !list_empty(&connector->modes)) {
list_for_each_entry(modes[i], &connector->modes, head)
@ -369,6 +443,8 @@ static int drm_pick_crtcs(struct drm_device *dev,
my_score = 1;
if (connector->status == connector_status_connected)
my_score++;
if (drm_has_cmdline_mode(connector))
my_score++;
if (drm_has_preferred_mode(connector, width, height))
my_score++;
@ -943,6 +1019,8 @@ bool drm_helper_initial_config(struct drm_device *dev)
{
int count = 0;
drm_fb_helper_parse_command_line(dev);
count = drm_helper_probe_connector_modes(dev,
dev->mode_config.max_width,
dev->mode_config.max_height);
@ -950,7 +1028,7 @@ bool drm_helper_initial_config(struct drm_device *dev)
/*
* we shouldn't end up with no modes here.
*/
WARN(!count, "Connected connector with 0 modes\n");
WARN(!count, "No connectors reported connected with modes\n");
drm_setup_crtcs(dev);

Просмотреть файл

@ -109,7 +109,9 @@ static struct edid_quirk {
/* Valid EDID header has these bytes */
static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 };
static const u8 edid_header[] = {
0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
};
/**
* edid_is_valid - sanity check EDID data
@ -500,6 +502,19 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
}
return mode;
}
/*
* 0 is reserved. The spec says 0x01 fill for unused timings. Some old
* monitors fill with ascii space (0x20) instead.
*/
static int
bad_std_timing(u8 a, u8 b)
{
return (a == 0x00 && b == 0x00) ||
(a == 0x01 && b == 0x01) ||
(a == 0x20 && b == 0x20);
}
/**
* drm_mode_std - convert standard mode info (width, height, refresh) into mode
* @t: standard timing params
@ -513,6 +528,7 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
*/
struct drm_display_mode *drm_mode_std(struct drm_device *dev,
struct std_timing *t,
int revision,
int timing_level)
{
struct drm_display_mode *mode;
@ -523,14 +539,20 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
>> EDID_TIMING_VFREQ_SHIFT;
if (bad_std_timing(t->hsize, t->vfreq_aspect))
return NULL;
/* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
hsize = t->hsize * 8 + 248;
/* vrefresh_rate = vfreq + 60 */
vrefresh_rate = vfreq + 60;
/* the vdisplay is calculated based on the aspect ratio */
if (aspect_ratio == 0)
vsize = (hsize * 10) / 16;
else if (aspect_ratio == 1)
if (aspect_ratio == 0) {
if (revision < 3)
vsize = hsize;
else
vsize = (hsize * 10) / 16;
} else if (aspect_ratio == 1)
vsize = (hsize * 3) / 4;
else if (aspect_ratio == 2)
vsize = (hsize * 4) / 5;
@ -538,7 +560,8 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
vsize = (hsize * 9) / 16;
/* HDTV hack */
if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
false);
mode->hdisplay = 1366;
mode->vsync_start = mode->vsync_start - 1;
mode->vsync_end = mode->vsync_end - 1;
@ -557,7 +580,8 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
break;
case LEVEL_CVT:
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
false);
break;
}
return mode;
@ -779,7 +803,7 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
continue;
newmode = drm_mode_std(dev, &edid->standard_timings[i],
timing_level);
edid->revision, timing_level);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;
@ -829,13 +853,13 @@ static int add_detailed_info(struct drm_connector *connector,
case EDID_DETAIL_MONITOR_CPDATA:
break;
case EDID_DETAIL_STD_MODES:
/* Five modes per detailed section */
for (j = 0; j < 5; i++) {
for (j = 0; j < 6; i++) {
struct std_timing *std;
struct drm_display_mode *newmode;
std = &data->data.timings[j];
newmode = drm_mode_std(dev, std,
edid->revision,
timing_level);
if (newmode) {
drm_mode_probed_add(connector, newmode);
@ -964,7 +988,9 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
struct drm_display_mode *newmode;
std = &data->data.timings[j];
newmode = drm_mode_std(dev, std, timing_level);
newmode = drm_mode_std(dev, std,
edid->revision,
timing_level);
if (newmode) {
drm_mode_probed_add(connector, newmode);
modes++;

Просмотреть файл

@ -40,6 +40,199 @@ MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list);
int drm_fb_helper_add_connector(struct drm_connector *connector)
{
connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
if (!connector->fb_helper_private)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_add_connector);
static int my_atoi(const char *name)
{
int val = 0;
for (;; name++) {
switch (*name) {
case '0' ... '9':
val = 10*val+(*name-'0');
break;
default:
return val;
}
}
}
/**
* drm_fb_helper_connector_parse_command_line - parse command line for connector
* @connector - connector to parse line for
* @mode_option - per connector mode option
*
* This parses the connector specific then generic command lines for
* modes and options to configure the connector.
*
* This uses the same parameters as the fb modedb.c, except for extra
* <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
*
* enable/enable Digital/disable bit at the end
*/
static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector,
const char *mode_option)
{
const char *name;
unsigned int namelen;
int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
int i;
enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
if (!fb_help_conn)
return false;
cmdline_mode = &fb_help_conn->cmdline_mode;
if (!mode_option)
mode_option = fb_mode_option;
if (!mode_option) {
cmdline_mode->specified = false;
return false;
}
name = mode_option;
namelen = strlen(name);
for (i = namelen-1; i >= 0; i--) {
switch (name[i]) {
case '@':
namelen = i;
if (!refresh_specified && !bpp_specified &&
!yres_specified) {
refresh = my_atoi(&name[i+1]);
refresh_specified = 1;
if (cvt || rb)
cvt = 0;
} else
goto done;
break;
case '-':
namelen = i;
if (!bpp_specified && !yres_specified) {
bpp = my_atoi(&name[i+1]);
bpp_specified = 1;
if (cvt || rb)
cvt = 0;
} else
goto done;
break;
case 'x':
if (!yres_specified) {
yres = my_atoi(&name[i+1]);
yres_specified = 1;
} else
goto done;
case '0' ... '9':
break;
case 'M':
if (!yres_specified)
cvt = 1;
break;
case 'R':
if (!cvt)
rb = 1;
break;
case 'm':
if (!cvt)
margins = 1;
break;
case 'i':
if (!cvt)
interlace = 1;
break;
case 'e':
force = DRM_FORCE_ON;
break;
case 'D':
if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) ||
(connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
force = DRM_FORCE_ON;
else
force = DRM_FORCE_ON_DIGITAL;
break;
case 'd':
force = DRM_FORCE_OFF;
break;
default:
goto done;
}
}
if (i < 0 && yres_specified) {
xres = my_atoi(name);
res_specified = 1;
}
done:
DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
drm_get_connector_name(connector), xres, yres,
(refresh) ? refresh : 60, (rb) ? " reduced blanking" :
"", (margins) ? " with margins" : "", (interlace) ?
" interlaced" : "");
if (force) {
const char *s;
switch (force) {
case DRM_FORCE_OFF: s = "OFF"; break;
case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
default:
case DRM_FORCE_ON: s = "ON"; break;
}
DRM_INFO("forcing %s connector %s\n",
drm_get_connector_name(connector), s);
connector->force = force;
}
if (res_specified) {
cmdline_mode->specified = true;
cmdline_mode->xres = xres;
cmdline_mode->yres = yres;
}
if (refresh_specified) {
cmdline_mode->refresh_specified = true;
cmdline_mode->refresh = refresh;
}
if (bpp_specified) {
cmdline_mode->bpp_specified = true;
cmdline_mode->bpp = bpp;
}
cmdline_mode->rb = rb ? true : false;
cmdline_mode->cvt = cvt ? true : false;
cmdline_mode->interlace = interlace ? true : false;
return true;
}
int drm_fb_helper_parse_command_line(struct drm_device *dev)
{
struct drm_connector *connector;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
char *option = NULL;
/* do something on return - turn off connector maybe */
if (fb_get_options(drm_get_connector_name(connector), &option))
continue;
drm_fb_helper_connector_parse_command_line(connector, option);
}
return 0;
}
bool drm_fb_helper_force_kernel_mode(void)
{
int i = 0;
@ -87,6 +280,7 @@ void drm_fb_helper_restore(void)
}
EXPORT_SYMBOL(drm_fb_helper_restore);
#ifdef CONFIG_MAGIC_SYSRQ
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
drm_fb_helper_restore();
@ -103,6 +297,7 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
.help_msg = "force-fb(V)",
.action_msg = "Restore framebuffer console",
};
#endif
static void drm_fb_helper_on(struct fb_info *info)
{
@ -484,6 +679,8 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
uint32_t fb_height,
uint32_t surface_width,
uint32_t surface_height,
uint32_t surface_depth,
uint32_t surface_bpp,
struct drm_framebuffer **fb_ptr))
{
struct drm_crtc *crtc;
@ -497,8 +694,43 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
struct drm_framebuffer *fb;
struct drm_mode_set *modeset = NULL;
struct drm_fb_helper *fb_helper;
uint32_t surface_depth = 24, surface_bpp = 32;
/* first up get a count of crtcs now in use and new min/maxes width/heights */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
struct drm_fb_helper_cmdline_mode *cmdline_mode;
if (!fb_help_conn)
continue;
cmdline_mode = &fb_help_conn->cmdline_mode;
if (cmdline_mode->bpp_specified) {
switch (cmdline_mode->bpp) {
case 8:
surface_depth = surface_bpp = 8;
break;
case 15:
surface_depth = 15;
surface_bpp = 16;
break;
case 16:
surface_depth = surface_bpp = 16;
break;
case 24:
surface_depth = surface_bpp = 24;
break;
case 32:
surface_depth = 24;
surface_bpp = 32;
break;
}
break;
}
}
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (drm_helper_crtc_in_use(crtc)) {
if (crtc->desired_mode) {
@ -527,7 +759,8 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
/* do we have an fb already? */
if (list_empty(&dev->mode_config.fb_kernel_list)) {
ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
surface_height, &fb);
surface_height, surface_depth, surface_bpp,
&fb);
if (ret)
return -EINVAL;
new_fb = 1;

Просмотреть файл

@ -88,7 +88,7 @@ EXPORT_SYMBOL(drm_mode_debug_printmodeline);
#define HV_FACTOR 1000
struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
int vdisplay, int vrefresh,
bool reduced, bool interlaced)
bool reduced, bool interlaced, bool margins)
{
/* 1) top/bottom margin size (% of height) - default: 1.8, */
#define CVT_MARGIN_PERCENTAGE 18
@ -101,7 +101,6 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
/* Pixel Clock step (kHz) */
#define CVT_CLOCK_STEP 250
struct drm_display_mode *drm_mode;
bool margins = false;
unsigned int vfieldrate, hperiod;
int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
int interlace;

Просмотреть файл

@ -110,6 +110,7 @@ EXPORT_SYMBOL(intelfb_resize);
static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
uint32_t fb_height, uint32_t surface_width,
uint32_t surface_height,
uint32_t surface_depth, uint32_t surface_bpp,
struct drm_framebuffer **fb_p)
{
struct fb_info *info;
@ -125,9 +126,9 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
mode_cmd.width = surface_width;
mode_cmd.height = surface_height;
mode_cmd.bpp = 32;
mode_cmd.bpp = surface_bpp;
mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
mode_cmd.depth = 24;
mode_cmd.depth = surface_depth;
size = mode_cmd.pitch * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);

3
drivers/gpu/drm/radeon/.gitignore поставляемый Normal file
Просмотреть файл

@ -0,0 +1,3 @@
mkregtable
*_reg_safe.h

Просмотреть файл

@ -57,13 +57,4 @@
#define VGA_RENDER_CONTROL 0x0300
#define VGA_VSTATUS_CNTL_MASK 0x00030000
/* AVIVO disable VGA rendering */
static inline void radeon_avivo_vga_render_disable(struct radeon_device *rdev)
{
u32 vga_render;
vga_render = RREG32(VGA_RENDER_CONTROL);
vga_render &= ~VGA_VSTATUS_CNTL_MASK;
WREG32(VGA_RENDER_CONTROL, vga_render);
}
#endif

Просмотреть файл

@ -863,13 +863,11 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
void r100_cs_dump_packet(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
struct radeon_cs_chunk *ib_chunk;
volatile uint32_t *ib;
unsigned i;
unsigned idx;
ib = p->ib->ptr;
ib_chunk = &p->chunks[p->chunk_ib_idx];
idx = pkt->idx;
for (i = 0; i <= (pkt->count + 1); i++, idx++) {
DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
@ -896,7 +894,7 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
idx, ib_chunk->length_dw);
return -EINVAL;
}
header = ib_chunk->kdata[idx];
header = radeon_get_ib_value(p, idx);
pkt->idx = idx;
pkt->type = CP_PACKET_GET_TYPE(header);
pkt->count = CP_PACKET_GET_COUNT(header);
@ -939,7 +937,6 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
*/
int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
{
struct radeon_cs_chunk *ib_chunk;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
struct radeon_crtc *radeon_crtc;
@ -947,8 +944,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
int crtc_id;
int r;
uint32_t header, h_idx, reg;
volatile uint32_t *ib;
ib_chunk = &p->chunks[p->chunk_ib_idx];
ib = p->ib->ptr;
/* parse the wait until */
r = r100_cs_packet_parse(p, &waitreloc, p->idx);
@ -963,24 +961,24 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
return r;
}
if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) {
if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
DRM_ERROR("vline wait had illegal wait until\n");
r = -EINVAL;
return r;
}
/* jump over the NOP */
r = r100_cs_packet_parse(p, &p3reloc, p->idx);
r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
if (r)
return r;
h_idx = p->idx - 2;
p->idx += waitreloc.count;
p->idx += p3reloc.count;
p->idx += waitreloc.count + 2;
p->idx += p3reloc.count + 2;
header = ib_chunk->kdata[h_idx];
crtc_id = ib_chunk->kdata[h_idx + 5];
reg = ib_chunk->kdata[h_idx] >> 2;
header = radeon_get_ib_value(p, h_idx);
crtc_id = radeon_get_ib_value(p, h_idx + 5);
reg = header >> 2;
mutex_lock(&p->rdev->ddev->mode_config.mutex);
obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
@ -994,16 +992,16 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
if (!crtc->enabled) {
/* if the CRTC isn't enabled - we need to nop out the wait until */
ib_chunk->kdata[h_idx + 2] = PACKET2(0);
ib_chunk->kdata[h_idx + 3] = PACKET2(0);
ib[h_idx + 2] = PACKET2(0);
ib[h_idx + 3] = PACKET2(0);
} else if (crtc_id == 1) {
switch (reg) {
case AVIVO_D1MODE_VLINE_START_END:
header &= R300_CP_PACKET0_REG_MASK;
header &= ~R300_CP_PACKET0_REG_MASK;
header |= AVIVO_D2MODE_VLINE_START_END >> 2;
break;
case RADEON_CRTC_GUI_TRIG_VLINE:
header &= R300_CP_PACKET0_REG_MASK;
header &= ~R300_CP_PACKET0_REG_MASK;
header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
break;
default:
@ -1011,8 +1009,8 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
r = -EINVAL;
goto out;
}
ib_chunk->kdata[h_idx] = header;
ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
ib[h_idx] = header;
ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
}
out:
mutex_unlock(&p->rdev->ddev->mode_config.mutex);
@ -1033,7 +1031,6 @@ out:
int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
struct radeon_cs_reloc **cs_reloc)
{
struct radeon_cs_chunk *ib_chunk;
struct radeon_cs_chunk *relocs_chunk;
struct radeon_cs_packet p3reloc;
unsigned idx;
@ -1044,7 +1041,6 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
return -EINVAL;
}
*cs_reloc = NULL;
ib_chunk = &p->chunks[p->chunk_ib_idx];
relocs_chunk = &p->chunks[p->chunk_relocs_idx];
r = r100_cs_packet_parse(p, &p3reloc, p->idx);
if (r) {
@ -1057,7 +1053,7 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, &p3reloc);
return -EINVAL;
}
idx = ib_chunk->kdata[p3reloc.idx + 1];
idx = radeon_get_ib_value(p, p3reloc.idx + 1);
if (idx >= relocs_chunk->length_dw) {
DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
idx, relocs_chunk->length_dw);
@ -1126,7 +1122,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
struct radeon_cs_chunk *ib_chunk;
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
@ -1134,11 +1129,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
int r;
int i, face;
u32 tile_flags = 0;
u32 idx_value;
ib = p->ib->ptr;
ib_chunk = &p->chunks[p->chunk_ib_idx];
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
switch (reg) {
case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p);
@ -1166,8 +1163,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r;
}
track->zb.robj = reloc->robj;
track->zb.offset = ib_chunk->kdata[idx];
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
track->zb.offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
r = r100_cs_packet_next_reloc(p, &reloc);
@ -1178,8 +1175,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
return r;
}
track->cb[0].robj = reloc->robj;
track->cb[0].offset = ib_chunk->kdata[idx];
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
track->cb[0].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_PP_TXOFFSET_0:
case RADEON_PP_TXOFFSET_1:
@ -1192,7 +1189,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
break;
case RADEON_PP_CUBIC_OFFSET_T0_0:
@ -1208,8 +1205,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx];
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
track->textures[0].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[0].cube_info[i].robj = reloc->robj;
break;
case RADEON_PP_CUBIC_OFFSET_T1_0:
@ -1225,8 +1222,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx];
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
track->textures[1].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[1].cube_info[i].robj = reloc->robj;
break;
case RADEON_PP_CUBIC_OFFSET_T2_0:
@ -1242,12 +1239,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx];
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
track->textures[2].cube_info[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[2].cube_info[i].robj = reloc->robj;
break;
case RADEON_RE_WIDTH_HEIGHT:
track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF);
track->maxy = ((idx_value >> 16) & 0x7FF);
break;
case RADEON_RB3D_COLORPITCH:
r = r100_cs_packet_next_reloc(p, &reloc);
@ -1263,17 +1260,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK;
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
break;
case RADEON_RB3D_DEPTHPITCH:
track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK;
track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
break;
case RADEON_RB3D_CNTL:
switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
case 7:
case 8:
case 9:
@ -1291,13 +1288,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
break;
default:
DRM_ERROR("Invalid color buffer format (%d) !\n",
((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
return -EINVAL;
}
track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE);
track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
break;
case RADEON_RB3D_ZSTENCILCNTL:
switch (ib_chunk->kdata[idx] & 0xf) {
switch (idx_value & 0xf) {
case 0:
track->zb.cpp = 2;
break;
@ -1321,44 +1318,44 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_PP_CNTL:
{
uint32_t temp = ib_chunk->kdata[idx] >> 4;
uint32_t temp = idx_value >> 4;
for (i = 0; i < track->num_texture; i++)
track->textures[i].enabled = !!(temp & (1 << i));
}
break;
case RADEON_SE_VF_CNTL:
track->vap_vf_cntl = ib_chunk->kdata[idx];
track->vap_vf_cntl = idx_value;
break;
case RADEON_SE_VTX_FMT:
track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]);
track->vtx_size = r100_get_vtx_size(idx_value);
break;
case RADEON_PP_TEX_SIZE_0:
case RADEON_PP_TEX_SIZE_1:
case RADEON_PP_TEX_SIZE_2:
i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1;
track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
break;
case RADEON_PP_TEX_PITCH_0:
case RADEON_PP_TEX_PITCH_1:
case RADEON_PP_TEX_PITCH_2:
i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
track->textures[i].pitch = ib_chunk->kdata[idx] + 32;
track->textures[i].pitch = idx_value + 32;
break;
case RADEON_PP_TXFILTER_0:
case RADEON_PP_TXFILTER_1:
case RADEON_PP_TXFILTER_2:
i = (reg - RADEON_PP_TXFILTER_0) / 24;
track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK)
track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
>> RADEON_MAX_MIP_LEVEL_SHIFT);
tmp = (ib_chunk->kdata[idx] >> 23) & 0x7;
tmp = (idx_value >> 23) & 0x7;
if (tmp == 2 || tmp == 6)
track->textures[i].roundup_w = false;
tmp = (ib_chunk->kdata[idx] >> 27) & 0x7;
tmp = (idx_value >> 27) & 0x7;
if (tmp == 2 || tmp == 6)
track->textures[i].roundup_h = false;
break;
@ -1366,16 +1363,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
case RADEON_PP_TXFORMAT_1:
case RADEON_PP_TXFORMAT_2:
i = (reg - RADEON_PP_TXFORMAT_0) / 24;
if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) {
if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
track->textures[i].use_pitch = 1;
} else {
track->textures[i].use_pitch = 0;
track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
}
if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
track->textures[i].tex_coord_type = 2;
switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) {
switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
case RADEON_TXFORMAT_I8:
case RADEON_TXFORMAT_RGB332:
case RADEON_TXFORMAT_Y8:
@ -1402,13 +1399,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
track->textures[i].cpp = 4;
break;
}
track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf);
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
break;
case RADEON_PP_CUBIC_FACES_0:
case RADEON_PP_CUBIC_FACES_1:
case RADEON_PP_CUBIC_FACES_2:
tmp = ib_chunk->kdata[idx];
tmp = idx_value;
i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
for (face = 0; face < 4; face++) {
track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
@ -1427,15 +1424,14 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
struct radeon_object *robj)
{
struct radeon_cs_chunk *ib_chunk;
unsigned idx;
ib_chunk = &p->chunks[p->chunk_ib_idx];
u32 value;
idx = pkt->idx + 1;
if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) {
value = radeon_get_ib_value(p, idx + 2);
if ((value + 1) > radeon_object_size(robj)) {
DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
"(need %u have %lu) !\n",
ib_chunk->kdata[idx+2] + 1,
value + 1,
radeon_object_size(robj));
return -EINVAL;
}
@ -1445,59 +1441,20 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
static int r100_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
struct radeon_cs_chunk *ib_chunk;
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
unsigned idx;
unsigned i, c;
volatile uint32_t *ib;
int r;
ib = p->ib->ptr;
ib_chunk = &p->chunks[p->chunk_ib_idx];
idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track;
switch (pkt->opcode) {
case PACKET3_3D_LOAD_VBPNTR:
c = ib_chunk->kdata[idx++];
track->num_arrays = c;
for (i = 0; i < (c - 1); i += 2, idx += 3) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
track->arrays[i + 0].esize &= 0x7F;
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 1].robj = reloc->robj;
track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
track->arrays[i + 1].esize &= 0x7F;
}
if (c & 1) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
track->arrays[i + 0].esize &= 0x7F;
}
r = r100_packet3_load_vbpntr(p, pkt, idx);
if (r)
return r;
break;
case PACKET3_INDX_BUFFER:
r = r100_cs_packet_next_reloc(p, &reloc);
@ -1506,7 +1463,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
if (r) {
return r;
@ -1520,27 +1477,27 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
track->num_arrays = 1;
track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]);
track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
track->arrays[0].robj = reloc->robj;
track->arrays[0].esize = track->vtx_size;
track->max_indx = ib_chunk->kdata[idx+1];
track->max_indx = radeon_get_ib_value(p, idx+1);
track->vap_vf_cntl = ib_chunk->kdata[idx+3];
track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
track->immd_dwords = pkt->count - 1;
r = r100_cs_track_check(p->rdev, track);
if (r)
return r;
break;
case PACKET3_3D_DRAW_IMMD:
if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vap_vf_cntl = ib_chunk->kdata[idx+1];
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
track->immd_dwords = pkt->count - 1;
r = r100_cs_track_check(p->rdev, track);
if (r)
@ -1548,11 +1505,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
break;
/* triggers drawing using in-packet vertex data */
case PACKET3_3D_DRAW_IMMD_2:
if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vap_vf_cntl = ib_chunk->kdata[idx];
track->vap_vf_cntl = radeon_get_ib_value(p, idx);
track->immd_dwords = pkt->count;
r = r100_cs_track_check(p->rdev, track);
if (r)
@ -1560,28 +1517,28 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
break;
/* triggers drawing using in-packet vertex data */
case PACKET3_3D_DRAW_VBUF_2:
track->vap_vf_cntl = ib_chunk->kdata[idx];
track->vap_vf_cntl = radeon_get_ib_value(p, idx);
r = r100_cs_track_check(p->rdev, track);
if (r)
return r;
break;
/* triggers drawing of vertex buffers setup elsewhere */
case PACKET3_3D_DRAW_INDX_2:
track->vap_vf_cntl = ib_chunk->kdata[idx];
track->vap_vf_cntl = radeon_get_ib_value(p, idx);
r = r100_cs_track_check(p->rdev, track);
if (r)
return r;
break;
/* triggers drawing using indices to vertex buffer */
case PACKET3_3D_DRAW_VBUF:
track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
r = r100_cs_track_check(p->rdev, track);
if (r)
return r;
break;
/* triggers drawing of vertex buffers setup elsewhere */
case PACKET3_3D_DRAW_INDX:
track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
r = r100_cs_track_check(p->rdev, track);
if (r)
return r;

Просмотреть файл

@ -84,6 +84,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg);
static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx,
@ -93,9 +95,7 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
u32 tile_flags = 0;
u32 tmp;
struct radeon_cs_reloc *reloc;
struct radeon_cs_chunk *ib_chunk;
ib_chunk = &p->chunks[p->chunk_ib_idx];
u32 value;
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
@ -104,7 +104,8 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
tmp = ib_chunk->kdata[idx] & 0x003fffff;
value = radeon_get_ib_value(p, idx);
tmp = value & 0x003fffff;
tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
@ -119,6 +120,64 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
}
tmp |= tile_flags;
p->ib->ptr[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp;
p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
return 0;
}
static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
int idx)
{
unsigned c, i;
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
int r = 0;
volatile uint32_t *ib;
u32 idx_value;
ib = p->ib->ptr;
track = (struct r100_cs_track *)p->track;
c = radeon_get_ib_value(p, idx++) & 0x1F;
track->num_arrays = c;
for (i = 0; i < (c - 1); i+=2, idx+=3) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize &= 0x7F;
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 1].robj = reloc->robj;
track->arrays[i + 1].esize = idx_value >> 24;
track->arrays[i + 1].esize &= 0x7F;
}
if (c & 1) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
idx_value = radeon_get_ib_value(p, idx);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = idx_value >> 8;
track->arrays[i + 0].esize &= 0x7F;
}
return r;
}

Просмотреть файл

@ -96,7 +96,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
struct radeon_cs_chunk *ib_chunk;
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
@ -105,11 +104,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
int i;
int face;
u32 tile_flags = 0;
u32 idx_value;
ib = p->ib->ptr;
ib_chunk = &p->chunks[p->chunk_ib_idx];
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
switch (reg) {
case RADEON_CRTC_GUI_TRIG_VLINE:
r = r100_cs_packet_parse_vline(p);
@ -137,8 +136,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r;
}
track->zb.robj = reloc->robj;
track->zb.offset = ib_chunk->kdata[idx];
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
track->zb.offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_RB3D_COLOROFFSET:
r = r100_cs_packet_next_reloc(p, &reloc);
@ -149,8 +148,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
return r;
}
track->cb[0].robj = reloc->robj;
track->cb[0].offset = ib_chunk->kdata[idx];
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
track->cb[0].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R200_PP_TXOFFSET_0:
case R200_PP_TXOFFSET_1:
@ -166,7 +165,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
break;
case R200_PP_CUBIC_OFFSET_F1_0:
@ -208,12 +207,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
track->textures[i].cube_info[face - 1].offset = ib_chunk->kdata[idx];
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
track->textures[i].cube_info[face - 1].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].cube_info[face - 1].robj = reloc->robj;
break;
case RADEON_RE_WIDTH_HEIGHT:
track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF);
track->maxy = ((idx_value >> 16) & 0x7FF);
break;
case RADEON_RB3D_COLORPITCH:
r = r100_cs_packet_next_reloc(p, &reloc);
@ -229,17 +228,17 @@ int r200_packet0_check(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK;
track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
break;
case RADEON_RB3D_DEPTHPITCH:
track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK;
track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
break;
case RADEON_RB3D_CNTL:
switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
case 7:
case 8:
case 9:
@ -257,18 +256,18 @@ int r200_packet0_check(struct radeon_cs_parser *p,
break;
default:
DRM_ERROR("Invalid color buffer format (%d) !\n",
((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
return -EINVAL;
}
if (ib_chunk->kdata[idx] & RADEON_DEPTHXY_OFFSET_ENABLE) {
if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
DRM_ERROR("No support for depth xy offset in kms\n");
return -EINVAL;
}
track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE);
track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
break;
case RADEON_RB3D_ZSTENCILCNTL:
switch (ib_chunk->kdata[idx] & 0xf) {
switch (idx_value & 0xf) {
case 0:
track->zb.cpp = 2;
break;
@ -292,27 +291,27 @@ int r200_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case RADEON_PP_CNTL:
{
uint32_t temp = ib_chunk->kdata[idx] >> 4;
uint32_t temp = idx_value >> 4;
for (i = 0; i < track->num_texture; i++)
track->textures[i].enabled = !!(temp & (1 << i));
}
break;
case RADEON_SE_VF_CNTL:
track->vap_vf_cntl = ib_chunk->kdata[idx];
track->vap_vf_cntl = idx_value;
break;
case 0x210c:
/* VAP_VF_MAX_VTX_INDX */
track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL;
track->max_indx = idx_value & 0x00FFFFFFUL;
break;
case R200_SE_VTX_FMT_0:
track->vtx_size = r200_get_vtx_size_0(ib_chunk->kdata[idx]);
track->vtx_size = r200_get_vtx_size_0(idx_value);
break;
case R200_SE_VTX_FMT_1:
track->vtx_size += r200_get_vtx_size_1(ib_chunk->kdata[idx]);
track->vtx_size += r200_get_vtx_size_1(idx_value);
break;
case R200_PP_TXSIZE_0:
case R200_PP_TXSIZE_1:
@ -321,8 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXSIZE_4:
case R200_PP_TXSIZE_5:
i = (reg - R200_PP_TXSIZE_0) / 32;
track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1;
track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
break;
case R200_PP_TXPITCH_0:
case R200_PP_TXPITCH_1:
@ -331,7 +330,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXPITCH_4:
case R200_PP_TXPITCH_5:
i = (reg - R200_PP_TXPITCH_0) / 32;
track->textures[i].pitch = ib_chunk->kdata[idx] + 32;
track->textures[i].pitch = idx_value + 32;
break;
case R200_PP_TXFILTER_0:
case R200_PP_TXFILTER_1:
@ -340,12 +339,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXFILTER_4:
case R200_PP_TXFILTER_5:
i = (reg - R200_PP_TXFILTER_0) / 32;
track->textures[i].num_levels = ((ib_chunk->kdata[idx] & R200_MAX_MIP_LEVEL_MASK)
track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK)
>> R200_MAX_MIP_LEVEL_SHIFT);
tmp = (ib_chunk->kdata[idx] >> 23) & 0x7;
tmp = (idx_value >> 23) & 0x7;
if (tmp == 2 || tmp == 6)
track->textures[i].roundup_w = false;
tmp = (ib_chunk->kdata[idx] >> 27) & 0x7;
tmp = (idx_value >> 27) & 0x7;
if (tmp == 2 || tmp == 6)
track->textures[i].roundup_h = false;
break;
@ -364,8 +363,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXFORMAT_X_4:
case R200_PP_TXFORMAT_X_5:
i = (reg - R200_PP_TXFORMAT_X_0) / 32;
track->textures[i].txdepth = ib_chunk->kdata[idx] & 0x7;
tmp = (ib_chunk->kdata[idx] >> 16) & 0x3;
track->textures[i].txdepth = idx_value & 0x7;
tmp = (idx_value >> 16) & 0x3;
/* 2D, 3D, CUBE */
switch (tmp) {
case 0:
@ -389,14 +388,14 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_TXFORMAT_4:
case R200_PP_TXFORMAT_5:
i = (reg - R200_PP_TXFORMAT_0) / 32;
if (ib_chunk->kdata[idx] & R200_TXFORMAT_NON_POWER2) {
if (idx_value & R200_TXFORMAT_NON_POWER2) {
track->textures[i].use_pitch = 1;
} else {
track->textures[i].use_pitch = 0;
track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
}
switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) {
switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
case R200_TXFORMAT_I8:
case R200_TXFORMAT_RGB332:
case R200_TXFORMAT_Y8:
@ -424,8 +423,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
track->textures[i].cpp = 4;
break;
}
track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf);
track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
break;
case R200_PP_CUBIC_FACES_0:
case R200_PP_CUBIC_FACES_1:
@ -433,7 +432,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
case R200_PP_CUBIC_FACES_3:
case R200_PP_CUBIC_FACES_4:
case R200_PP_CUBIC_FACES_5:
tmp = ib_chunk->kdata[idx];
tmp = idx_value;
i = (reg - R200_PP_CUBIC_FACES_0) / 32;
for (face = 0; face < 4; face++) {
track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);

Просмотреть файл

@ -697,17 +697,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt,
unsigned idx, unsigned reg)
{
struct radeon_cs_chunk *ib_chunk;
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
uint32_t tmp, tile_flags = 0;
unsigned i;
int r;
u32 idx_value;
ib = p->ib->ptr;
ib_chunk = &p->chunks[p->chunk_ib_idx];
track = (struct r100_cs_track *)p->track;
idx_value = radeon_get_ib_value(p, idx);
switch(reg) {
case AVIVO_D1MODE_VLINE_START_END:
case RADEON_CRTC_GUI_TRIG_VLINE:
@ -738,8 +739,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r;
}
track->cb[i].robj = reloc->robj;
track->cb[i].offset = ib_chunk->kdata[idx];
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
track->cb[i].offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_ZB_DEPTHOFFSET:
r = r100_cs_packet_next_reloc(p, &reloc);
@ -750,8 +751,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
return r;
}
track->zb.robj = reloc->robj;
track->zb.offset = ib_chunk->kdata[idx];
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
track->zb.offset = idx_value;
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case R300_TX_OFFSET_0:
case R300_TX_OFFSET_0+4:
@ -777,32 +778,32 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
track->textures[i].robj = reloc->robj;
break;
/* Tracked registers */
case 0x2084:
/* VAP_VF_CNTL */
track->vap_vf_cntl = ib_chunk->kdata[idx];
track->vap_vf_cntl = idx_value;
break;
case 0x20B4:
/* VAP_VTX_SIZE */
track->vtx_size = ib_chunk->kdata[idx] & 0x7F;
track->vtx_size = idx_value & 0x7F;
break;
case 0x2134:
/* VAP_VF_MAX_VTX_INDX */
track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL;
track->max_indx = idx_value & 0x00FFFFFFUL;
break;
case 0x43E4:
/* SC_SCISSOR1 */
track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1;
track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
if (p->rdev->family < CHIP_RV515) {
track->maxy -= 1440;
}
break;
case 0x4E00:
/* RB3D_CCTL */
track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1;
track->num_cb = ((idx_value >> 5) & 0x3) + 1;
break;
case 0x4E38:
case 0x4E3C:
@ -825,13 +826,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_COLOR_MICROTILE_ENABLE;
tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
i = (reg - 0x4E38) >> 2;
track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE;
switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) {
track->cb[i].pitch = idx_value & 0x3FFE;
switch (((idx_value >> 21) & 0xF)) {
case 9:
case 11:
case 12:
@ -854,13 +855,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
default:
DRM_ERROR("Invalid color buffer format (%d) !\n",
((ib_chunk->kdata[idx] >> 21) & 0xF));
((idx_value >> 21) & 0xF));
return -EINVAL;
}
break;
case 0x4F00:
/* ZB_CNTL */
if (ib_chunk->kdata[idx] & 2) {
if (idx_value & 2) {
track->z_enabled = true;
} else {
track->z_enabled = false;
@ -868,7 +869,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
case 0x4F10:
/* ZB_FORMAT */
switch ((ib_chunk->kdata[idx] & 0xF)) {
switch ((idx_value & 0xF)) {
case 0:
case 1:
track->zb.cpp = 2;
@ -878,7 +879,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
default:
DRM_ERROR("Invalid z buffer format (%d) !\n",
(ib_chunk->kdata[idx] & 0xF));
(idx_value & 0xF));
return -EINVAL;
}
break;
@ -897,17 +898,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
tile_flags |= R300_DEPTHMICROTILE_TILED;;
tmp = ib_chunk->kdata[idx] & ~(0x7 << 16);
tmp = idx_value & ~(0x7 << 16);
tmp |= tile_flags;
ib[idx] = tmp;
track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC;
track->zb.pitch = idx_value & 0x3FFC;
break;
case 0x4104:
for (i = 0; i < 16; i++) {
bool enabled;
enabled = !!(ib_chunk->kdata[idx] & (1 << i));
enabled = !!(idx_value & (1 << i));
track->textures[i].enabled = enabled;
}
break;
@ -929,9 +930,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x44FC:
/* TX_FORMAT1_[0-15] */
i = (reg - 0x44C0) >> 2;
tmp = (ib_chunk->kdata[idx] >> 25) & 0x3;
tmp = (idx_value >> 25) & 0x3;
track->textures[i].tex_coord_type = tmp;
switch ((ib_chunk->kdata[idx] & 0x1F)) {
switch ((idx_value & 0x1F)) {
case R300_TX_FORMAT_X8:
case R300_TX_FORMAT_Y4X4:
case R300_TX_FORMAT_Z3Y3X2:
@ -971,7 +972,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
break;
default:
DRM_ERROR("Invalid texture format %u\n",
(ib_chunk->kdata[idx] & 0x1F));
(idx_value & 0x1F));
return -EINVAL;
break;
}
@ -994,11 +995,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x443C:
/* TX_FILTER0_[0-15] */
i = (reg - 0x4400) >> 2;
tmp = ib_chunk->kdata[idx] & 0x7;
tmp = idx_value & 0x7;
if (tmp == 2 || tmp == 4 || tmp == 6) {
track->textures[i].roundup_w = false;
}
tmp = (ib_chunk->kdata[idx] >> 3) & 0x7;
tmp = (idx_value >> 3) & 0x7;
if (tmp == 2 || tmp == 4 || tmp == 6) {
track->textures[i].roundup_h = false;
}
@ -1021,12 +1022,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x453C:
/* TX_FORMAT2_[0-15] */
i = (reg - 0x4500) >> 2;
tmp = ib_chunk->kdata[idx] & 0x3FFF;
tmp = idx_value & 0x3FFF;
track->textures[i].pitch = tmp + 1;
if (p->rdev->family >= CHIP_RV515) {
tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11;
tmp = ((idx_value >> 15) & 1) << 11;
track->textures[i].width_11 = tmp;
tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11;
tmp = ((idx_value >> 16) & 1) << 11;
track->textures[i].height_11 = tmp;
}
break;
@ -1048,15 +1049,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
case 0x44BC:
/* TX_FORMAT0_[0-15] */
i = (reg - 0x4480) >> 2;
tmp = ib_chunk->kdata[idx] & 0x7FF;
tmp = idx_value & 0x7FF;
track->textures[i].width = tmp + 1;
tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF;
tmp = (idx_value >> 11) & 0x7FF;
track->textures[i].height = tmp + 1;
tmp = (ib_chunk->kdata[idx] >> 26) & 0xF;
tmp = (idx_value >> 26) & 0xF;
track->textures[i].num_levels = tmp;
tmp = ib_chunk->kdata[idx] & (1 << 31);
tmp = idx_value & (1 << 31);
track->textures[i].use_pitch = !!tmp;
tmp = (ib_chunk->kdata[idx] >> 22) & 0xF;
tmp = (idx_value >> 22) & 0xF;
track->textures[i].txdepth = tmp;
break;
case R300_ZB_ZPASS_ADDR:
@ -1067,7 +1068,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset);
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
break;
case 0x4be8:
/* valid register only on RV530 */
@ -1085,60 +1086,20 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
static int r300_packet3_check(struct radeon_cs_parser *p,
struct radeon_cs_packet *pkt)
{
struct radeon_cs_chunk *ib_chunk;
struct radeon_cs_reloc *reloc;
struct r100_cs_track *track;
volatile uint32_t *ib;
unsigned idx;
unsigned i, c;
int r;
ib = p->ib->ptr;
ib_chunk = &p->chunks[p->chunk_ib_idx];
idx = pkt->idx + 1;
track = (struct r100_cs_track *)p->track;
switch(pkt->opcode) {
case PACKET3_3D_LOAD_VBPNTR:
c = ib_chunk->kdata[idx++] & 0x1F;
track->num_arrays = c;
for (i = 0; i < (c - 1); i+=2, idx+=3) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
track->arrays[i + 0].esize &= 0x7F;
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 1].robj = reloc->robj;
track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
track->arrays[i + 1].esize &= 0x7F;
}
if (c & 1) {
r = r100_cs_packet_next_reloc(p, &reloc);
if (r) {
DRM_ERROR("No reloc for packet3 %d\n",
pkt->opcode);
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
track->arrays[i + 0].robj = reloc->robj;
track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
track->arrays[i + 0].esize &= 0x7F;
}
r = r100_packet3_load_vbpntr(p, pkt, idx);
if (r)
return r;
break;
case PACKET3_INDX_BUFFER:
r = r100_cs_packet_next_reloc(p, &reloc);
@ -1147,7 +1108,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
r100_cs_dump_packet(p, pkt);
return r;
}
ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
if (r) {
return r;
@ -1158,11 +1119,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
/* Number of dwords is vtx_size * (num_vertices - 1)
* PRIM_WALK must be equal to 3 vertex data in embedded
* in cmd stream */
if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) {
if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vap_vf_cntl = ib_chunk->kdata[idx+1];
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
track->immd_dwords = pkt->count - 1;
r = r100_cs_track_check(p->rdev, track);
if (r) {
@ -1173,11 +1134,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
/* Number of dwords is vtx_size * (num_vertices - 1)
* PRIM_WALK must be equal to 3 vertex data in embedded
* in cmd stream */
if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) {
if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
return -EINVAL;
}
track->vap_vf_cntl = ib_chunk->kdata[idx];
track->vap_vf_cntl = radeon_get_ib_value(p, idx);
track->immd_dwords = pkt->count;
r = r100_cs_track_check(p->rdev, track);
if (r) {
@ -1185,28 +1146,28 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
}
break;
case PACKET3_3D_DRAW_VBUF:
track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
r = r100_cs_track_check(p->rdev, track);
if (r) {
return r;
}
break;
case PACKET3_3D_DRAW_VBUF_2:
track->vap_vf_cntl = ib_chunk->kdata[idx];
track->vap_vf_cntl = radeon_get_ib_value(p, idx);
r = r100_cs_track_check(p->rdev, track);
if (r) {
return r;
}
break;
case PACKET3_3D_DRAW_INDX:
track->vap_vf_cntl = ib_chunk->kdata[idx + 1];
track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
r = r100_cs_track_check(p->rdev, track);
if (r) {
return r;
}
break;
case PACKET3_3D_DRAW_INDX_2:
track->vap_vf_cntl = ib_chunk->kdata[idx];
track->vap_vf_cntl = radeon_get_ib_value(p, idx);
r = r100_cs_track_check(p->rdev, track);
if (r) {
return r;

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше