Merge 3.0-rc2 + Linus's latest into usb-linus
This is needed to get the following MAINTAINERS patch to apply properly. Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Коммит
7e24cf43f7
8
CREDITS
8
CREDITS
|
@ -518,6 +518,14 @@ N: Zach Brown
|
|||
E: zab@zabbo.net
|
||||
D: maestro pci sound
|
||||
|
||||
M: David Brownell
|
||||
D: Kernel engineer, mentor, and friend. Maintained USB EHCI and
|
||||
D: gadget layers, SPI subsystem, GPIO subsystem, and more than a few
|
||||
D: device drivers. His encouragement also helped many engineers get
|
||||
D: started working on the Linux kernel. David passed away in early
|
||||
D: 2011, and will be greatly missed.
|
||||
W: https://lkml.org/lkml/2011/4/5/36
|
||||
|
||||
N: Gary Brubaker
|
||||
E: xavyer@ix.netcom.com
|
||||
D: USB Serial Empeg Empeg-car Mark I/II Driver
|
||||
|
|
14
MAINTAINERS
14
MAINTAINERS
|
@ -4252,8 +4252,7 @@ F: drivers/mmc/
|
|||
F: include/linux/mmc/
|
||||
|
||||
MULTIMEDIA CARD (MMC) ETC. OVER SPI
|
||||
M: David Brownell <dbrownell@users.sourceforge.net>
|
||||
S: Odd Fixes
|
||||
S: Orphan
|
||||
F: drivers/mmc/host/mmc_spi.c
|
||||
F: include/linux/spi/mmc_spi.h
|
||||
|
||||
|
@ -4603,7 +4602,6 @@ F: drivers/media/video/omap3isp/*
|
|||
|
||||
OMAP USB SUPPORT
|
||||
M: Felipe Balbi <balbi@ti.com>
|
||||
M: David Brownell <dbrownell@users.sourceforge.net>
|
||||
L: linux-usb@vger.kernel.org
|
||||
L: linux-omap@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
|
||||
|
@ -5984,7 +5982,6 @@ F: Documentation/serial/specialix.txt
|
|||
F: drivers/staging/tty/specialix*
|
||||
|
||||
SPI SUBSYSTEM
|
||||
M: David Brownell <dbrownell@users.sourceforge.net>
|
||||
M: Grant Likely <grant.likely@secretlab.ca>
|
||||
L: spi-devel-general@lists.sourceforge.net
|
||||
Q: http://patchwork.kernel.org/project/spi-devel-general/list/
|
||||
|
@ -6432,9 +6429,8 @@ S: Maintained
|
|||
F: drivers/usb/misc/rio500*
|
||||
|
||||
USB EHCI DRIVER
|
||||
M: David Brownell <dbrownell@users.sourceforge.net>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Odd Fixes
|
||||
S: Orphan
|
||||
F: Documentation/usb/ehci.txt
|
||||
F: drivers/usb/host/ehci*
|
||||
|
||||
|
@ -6448,10 +6444,9 @@ S: Maintained
|
|||
F: drivers/media/video/et61x251/
|
||||
|
||||
USB GADGET/PERIPHERAL SUBSYSTEM
|
||||
M: David Brownell <dbrownell@users.sourceforge.net>
|
||||
L: linux-usb@vger.kernel.org
|
||||
W: http://www.linux-usb.org/gadget
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
F: drivers/usb/gadget/
|
||||
F: include/linux/usb/gadget*
|
||||
|
||||
|
@ -6492,9 +6487,8 @@ S: Maintained
|
|||
F: sound/usb/midi.*
|
||||
|
||||
USB OHCI DRIVER
|
||||
M: David Brownell <dbrownell@users.sourceforge.net>
|
||||
L: linux-usb@vger.kernel.org
|
||||
S: Odd Fixes
|
||||
S: Orphan
|
||||
F: Documentation/usb/ohci.txt
|
||||
F: drivers/usb/host/ohci*
|
||||
|
||||
|
|
|
@ -284,14 +284,15 @@ static int __init omap1_system_dma_init(void)
|
|||
dma_base = ioremap(res[0].start, resource_size(&res[0]));
|
||||
if (!dma_base) {
|
||||
pr_err("%s: Unable to ioremap\n", __func__);
|
||||
return -ENODEV;
|
||||
ret = -ENODEV;
|
||||
goto exit_device_put;
|
||||
}
|
||||
|
||||
ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "%s: Unable to add resources for %s%d\n",
|
||||
__func__, pdev->name, pdev->id);
|
||||
goto exit_device_del;
|
||||
goto exit_device_put;
|
||||
}
|
||||
|
||||
p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
|
||||
|
@ -299,7 +300,7 @@ static int __init omap1_system_dma_init(void)
|
|||
dev_err(&pdev->dev, "%s: Unable to allocate 'p' for %s\n",
|
||||
__func__, pdev->name);
|
||||
ret = -ENOMEM;
|
||||
goto exit_device_put;
|
||||
goto exit_device_del;
|
||||
}
|
||||
|
||||
d = kzalloc(sizeof(struct omap_dma_dev_attr), GFP_KERNEL);
|
||||
|
@ -380,10 +381,10 @@ exit_release_d:
|
|||
kfree(d);
|
||||
exit_release_p:
|
||||
kfree(p);
|
||||
exit_device_put:
|
||||
platform_device_put(pdev);
|
||||
exit_device_del:
|
||||
platform_device_del(pdev);
|
||||
exit_device_put:
|
||||
platform_device_put(pdev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -26,13 +26,13 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/gpio.h>
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/map.h>
|
||||
|
||||
#include <mach/gpio.h>
|
||||
#include <plat/board.h>
|
||||
#include <plat/common.h>
|
||||
#include <plat/gpmc.h>
|
||||
|
|
|
@ -622,19 +622,19 @@ static struct omap_device_pad serial3_pads[] __initdata = {
|
|||
OMAP_MUX_MODE0),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial1_data = {
|
||||
static struct omap_board_data serial1_data __initdata = {
|
||||
.id = 0,
|
||||
.pads = serial1_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial1_pads),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial2_data = {
|
||||
static struct omap_board_data serial2_data __initdata = {
|
||||
.id = 1,
|
||||
.pads = serial2_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial2_pads),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial3_data = {
|
||||
static struct omap_board_data serial3_data __initdata = {
|
||||
.id = 2,
|
||||
.pads = serial3_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial3_pads),
|
||||
|
|
|
@ -258,7 +258,7 @@ static struct gpio sdp4430_eth_gpios[] __initdata = {
|
|||
{ ETH_KS8851_IRQ, GPIOF_IN, "eth_irq" },
|
||||
};
|
||||
|
||||
static int omap_ethernet_init(void)
|
||||
static int __init omap_ethernet_init(void)
|
||||
{
|
||||
int status;
|
||||
|
||||
|
@ -322,6 +322,7 @@ static struct omap2_hsmmc_info mmc[] = {
|
|||
.gpio_wp = -EINVAL,
|
||||
.nonremovable = true,
|
||||
.ocr_mask = MMC_VDD_29_30,
|
||||
.no_off_init = true,
|
||||
},
|
||||
{
|
||||
.mmc = 1,
|
||||
|
@ -681,19 +682,19 @@ static struct omap_device_pad serial4_pads[] __initdata = {
|
|||
OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial2_data = {
|
||||
static struct omap_board_data serial2_data __initdata = {
|
||||
.id = 1,
|
||||
.pads = serial2_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial2_pads),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial3_data = {
|
||||
static struct omap_board_data serial3_data __initdata = {
|
||||
.id = 2,
|
||||
.pads = serial3_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial3_pads),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial4_data = {
|
||||
static struct omap_board_data serial4_data __initdata = {
|
||||
.id = 3,
|
||||
.pads = serial4_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial4_pads),
|
||||
|
@ -729,7 +730,7 @@ static void __init omap_4430sdp_init(void)
|
|||
|
||||
if (omap_rev() == OMAP4430_REV_ES1_0)
|
||||
package = OMAP_PACKAGE_CBL;
|
||||
omap4_mux_init(board_mux, package);
|
||||
omap4_mux_init(board_mux, NULL, package);
|
||||
|
||||
omap_board_config = sdp4430_config;
|
||||
omap_board_config_size = ARRAY_SIZE(sdp4430_config);
|
||||
|
|
|
@ -27,13 +27,13 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/smc91x.h>
|
||||
#include <linux/gpio.h>
|
||||
|
||||
#include <mach/hardware.h>
|
||||
#include <asm/mach-types.h>
|
||||
#include <asm/mach/arch.h>
|
||||
#include <asm/mach/flash.h>
|
||||
|
||||
#include <mach/gpio.h>
|
||||
#include <plat/led.h>
|
||||
#include <plat/usb.h>
|
||||
#include <plat/board.h>
|
||||
|
|
|
@ -63,8 +63,6 @@
|
|||
#define SB_T35_SMSC911X_CS 4
|
||||
#define SB_T35_SMSC911X_GPIO 65
|
||||
|
||||
#define NAND_BLOCK_SIZE SZ_128K
|
||||
|
||||
#if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE)
|
||||
#include <linux/smsc911x.h>
|
||||
#include <plat/gpmc-smsc911x.h>
|
||||
|
|
|
@ -48,6 +48,7 @@
|
|||
|
||||
#include "mux.h"
|
||||
#include "control.h"
|
||||
#include "common-board-devices.h"
|
||||
|
||||
#if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE)
|
||||
static struct gpio_led cm_t3517_leds[] = {
|
||||
|
@ -177,7 +178,7 @@ static struct usbhs_omap_board_data cm_t3517_ehci_pdata __initdata = {
|
|||
.reset_gpio_port[2] = -EINVAL,
|
||||
};
|
||||
|
||||
static int cm_t3517_init_usbh(void)
|
||||
static int __init cm_t3517_init_usbh(void)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
@ -203,8 +204,6 @@ static inline int cm_t3517_init_usbh(void)
|
|||
#endif
|
||||
|
||||
#if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE)
|
||||
#define NAND_BLOCK_SIZE SZ_128K
|
||||
|
||||
static struct mtd_partition cm_t3517_nand_partitions[] = {
|
||||
{
|
||||
.name = "xloader",
|
||||
|
|
|
@ -61,8 +61,6 @@
|
|||
#include "timer-gp.h"
|
||||
#include "common-board-devices.h"
|
||||
|
||||
#define NAND_BLOCK_SIZE SZ_128K
|
||||
|
||||
#define OMAP_DM9000_GPIO_IRQ 25
|
||||
#define OMAP3_DEVKIT_TS_GPIO 27
|
||||
|
||||
|
|
|
@ -54,8 +54,6 @@
|
|||
#include "pm.h"
|
||||
#include "common-board-devices.h"
|
||||
|
||||
#define NAND_BLOCK_SIZE SZ_128K
|
||||
|
||||
/*
|
||||
* OMAP3 Beagle revision
|
||||
* Run time detection of Beagle revision is done by reading GPIO.
|
||||
|
@ -106,6 +104,9 @@ static void __init omap3_beagle_init_rev(void)
|
|||
beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1)
|
||||
| (gpio_get_value(173) << 2);
|
||||
|
||||
gpio_free_array(omap3_beagle_rev_gpios,
|
||||
ARRAY_SIZE(omap3_beagle_rev_gpios));
|
||||
|
||||
switch (beagle_rev) {
|
||||
case 7:
|
||||
printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n");
|
||||
|
@ -579,6 +580,9 @@ static void __init omap3_beagle_init(void)
|
|||
omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions,
|
||||
ARRAY_SIZE(omap3beagle_nand_partitions));
|
||||
|
||||
/* Ensure msecure is mux'd to be able to set the RTC. */
|
||||
omap_mux_init_signal("sys_drm_msecure", OMAP_PIN_OFF_OUTPUT_HIGH);
|
||||
|
||||
/* Ensure SDRC pins are mux'd for self-refresh */
|
||||
omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT);
|
||||
omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT);
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <linux/leds.h>
|
||||
#include <linux/input.h>
|
||||
#include <linux/input/matrix_keypad.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio_keys.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/card.h>
|
||||
|
@ -41,7 +42,6 @@
|
|||
|
||||
#include <plat/board.h>
|
||||
#include <plat/common.h>
|
||||
#include <mach/gpio.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <plat/mcspi.h>
|
||||
#include <plat/usb.h>
|
||||
|
@ -57,8 +57,6 @@
|
|||
#define PANDORA_WIFI_NRESET_GPIO 23
|
||||
#define OMAP3_PANDORA_TS_GPIO 94
|
||||
|
||||
#define NAND_BLOCK_SIZE SZ_128K
|
||||
|
||||
static struct mtd_partition omap3pandora_nand_partitions[] = {
|
||||
{
|
||||
.name = "xloader",
|
||||
|
|
|
@ -56,8 +56,6 @@
|
|||
|
||||
#include <asm/setup.h>
|
||||
|
||||
#define NAND_BLOCK_SIZE SZ_128K
|
||||
|
||||
#define OMAP3_AC_GPIO 136
|
||||
#define OMAP3_TS_GPIO 162
|
||||
#define TB_BL_PWM_TIMER 9
|
||||
|
|
|
@ -526,19 +526,19 @@ static struct omap_device_pad serial4_pads[] __initdata = {
|
|||
OMAP_PIN_OUTPUT | OMAP_MUX_MODE0),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial2_data = {
|
||||
static struct omap_board_data serial2_data __initdata = {
|
||||
.id = 1,
|
||||
.pads = serial2_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial2_pads),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial3_data = {
|
||||
static struct omap_board_data serial3_data __initdata = {
|
||||
.id = 2,
|
||||
.pads = serial3_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial3_pads),
|
||||
};
|
||||
|
||||
static struct omap_board_data serial4_data = {
|
||||
static struct omap_board_data serial4_data __initdata = {
|
||||
.id = 3,
|
||||
.pads = serial4_pads,
|
||||
.pads_cnt = ARRAY_SIZE(serial4_pads),
|
||||
|
@ -687,7 +687,7 @@ static void __init omap4_panda_init(void)
|
|||
|
||||
if (omap_rev() == OMAP4430_REV_ES1_0)
|
||||
package = OMAP_PACKAGE_CBL;
|
||||
omap4_mux_init(board_mux, package);
|
||||
omap4_mux_init(board_mux, NULL, package);
|
||||
|
||||
if (wl12xx_set_platform_data(&omap_panda_wlan_data))
|
||||
pr_err("error setting wl12xx data\n");
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/i2c/twl.h>
|
||||
|
@ -45,7 +46,6 @@
|
|||
#include <plat/common.h>
|
||||
#include <video/omapdss.h>
|
||||
#include <video/omap-panel-generic-dpi.h>
|
||||
#include <mach/gpio.h>
|
||||
#include <plat/gpmc.h>
|
||||
#include <mach/hardware.h>
|
||||
#include <plat/nand.h>
|
||||
|
@ -65,8 +65,6 @@
|
|||
#define OVERO_GPIO_USBH_CPEN 168
|
||||
#define OVERO_GPIO_USBH_NRESET 183
|
||||
|
||||
#define NAND_BLOCK_SIZE SZ_128K
|
||||
|
||||
#define OVERO_SMSC911X_CS 5
|
||||
#define OVERO_SMSC911X_GPIO 176
|
||||
#define OVERO_SMSC911X2_CS 4
|
||||
|
|
|
@ -488,6 +488,7 @@ static struct regulator_init_data rx51_vmmc2 = {
|
|||
.name = "V28_A",
|
||||
.min_uV = 2800000,
|
||||
.max_uV = 3000000,
|
||||
.always_on = true, /* due VIO leak to AIC34 VDDs */
|
||||
.apply_uV = true,
|
||||
.valid_modes_mask = REGULATOR_MODE_NORMAL
|
||||
| REGULATOR_MODE_STANDBY,
|
||||
|
@ -582,7 +583,7 @@ static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n)
|
|||
{
|
||||
/* FIXME this gpio setup is just a placeholder for now */
|
||||
gpio_request_one(gpio + 6, GPIOF_OUT_INIT_LOW, "backlight_pwm");
|
||||
gpio_request_one(gpio + 7, GPIOF_OUT_INIT_HIGH, "speaker_en");
|
||||
gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "speaker_en");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ static struct gpio zoom_lcd_gpios[] __initdata = {
|
|||
{ LCD_PANEL_QVGA_GPIO, GPIOF_OUT_INIT_HIGH, "lcd qvga" },
|
||||
};
|
||||
|
||||
static void zoom_lcd_panel_init(void)
|
||||
static void __init zoom_lcd_panel_init(void)
|
||||
{
|
||||
zoom_lcd_gpios[0].gpio = (omap_rev() > OMAP3430_REV_ES3_0) ?
|
||||
LCD_PANEL_RESET_GPIO_PROD :
|
||||
|
|
|
@ -85,18 +85,18 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
|
|||
struct spi_board_info *spi_bi = &ads7846_spi_board_info;
|
||||
int err;
|
||||
|
||||
err = gpio_request(gpio_pendown, "TS PenDown");
|
||||
if (err) {
|
||||
pr_err("Could not obtain gpio for TS PenDown: %d\n", err);
|
||||
return;
|
||||
if (board_pdata && board_pdata->get_pendown_state) {
|
||||
err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
|
||||
if (err) {
|
||||
pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
|
||||
return;
|
||||
}
|
||||
gpio_export(gpio_pendown, 0);
|
||||
|
||||
if (gpio_debounce)
|
||||
gpio_set_debounce(gpio_pendown, gpio_debounce);
|
||||
}
|
||||
|
||||
gpio_direction_input(gpio_pendown);
|
||||
gpio_export(gpio_pendown, 0);
|
||||
|
||||
if (gpio_debounce)
|
||||
gpio_set_debounce(gpio_pendown, gpio_debounce);
|
||||
|
||||
ads7846_config.gpio_pendown = gpio_pendown;
|
||||
|
||||
spi_bi->bus_num = bus_num;
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#ifndef __OMAP_COMMON_BOARD_DEVICES__
|
||||
#define __OMAP_COMMON_BOARD_DEVICES__
|
||||
|
||||
#define NAND_BLOCK_SIZE SZ_128K
|
||||
|
||||
struct twl4030_platform_data;
|
||||
struct mtd_partition;
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ static int __init omap4_l3_init(void)
|
|||
|
||||
WARN(IS_ERR(od), "could not build omap_device for %s\n", oh_name);
|
||||
|
||||
return PTR_ERR(od);
|
||||
return IS_ERR(od) ? PTR_ERR(od) : 0;
|
||||
}
|
||||
postcore_initcall(omap4_l3_init);
|
||||
|
||||
|
|
|
@ -145,6 +145,7 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
|
|||
int power_on, int vdd)
|
||||
{
|
||||
u32 reg;
|
||||
unsigned long timeout;
|
||||
|
||||
if (power_on) {
|
||||
reg = omap4_ctrl_pad_readl(control_pbias_offset);
|
||||
|
@ -157,9 +158,15 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot,
|
|||
OMAP4_MMC1_PWRDNZ_MASK |
|
||||
OMAP4_USBC1_ICUSB_PWRDNZ_MASK);
|
||||
omap4_ctrl_pad_writel(reg, control_pbias_offset);
|
||||
/* 4 microsec delay for comparator to generate an error*/
|
||||
udelay(4);
|
||||
reg = omap4_ctrl_pad_readl(control_pbias_offset);
|
||||
|
||||
timeout = jiffies + msecs_to_jiffies(5);
|
||||
do {
|
||||
reg = omap4_ctrl_pad_readl(control_pbias_offset);
|
||||
if (!(reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK))
|
||||
break;
|
||||
usleep_range(100, 200);
|
||||
} while (!time_after(jiffies, timeout));
|
||||
|
||||
if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) {
|
||||
pr_err("Pbias Voltage is not same as LDO\n");
|
||||
/* Caution : On VMODE_ERROR Power Down MMC IO */
|
||||
|
@ -331,6 +338,9 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
|
|||
if (c->no_off)
|
||||
mmc->slots[0].no_off = 1;
|
||||
|
||||
if (c->no_off_init)
|
||||
mmc->slots[0].no_regulator_off_init = c->no_off_init;
|
||||
|
||||
if (c->vcc_aux_disable_is_sleep)
|
||||
mmc->slots[0].vcc_aux_disable_is_sleep = 1;
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ struct omap2_hsmmc_info {
|
|||
bool nonremovable; /* Nonremovable e.g. eMMC */
|
||||
bool power_saving; /* Try to sleep or power off when possible */
|
||||
bool no_off; /* power_saving and power is not to go off */
|
||||
bool no_off_init; /* no power off when not in MMC sleep state */
|
||||
bool vcc_aux_disable_is_sleep; /* Regulator off remapped to sleep */
|
||||
int gpio_cd; /* or -EINVAL */
|
||||
int gpio_wp; /* or -EINVAL */
|
||||
|
|
|
@ -83,6 +83,9 @@ void omap_mux_write(struct omap_mux_partition *partition, u16 val,
|
|||
void omap_mux_write_array(struct omap_mux_partition *partition,
|
||||
struct omap_board_mux *board_mux)
|
||||
{
|
||||
if (!board_mux)
|
||||
return;
|
||||
|
||||
while (board_mux->reg_offset != OMAP_MUX_TERMINATOR) {
|
||||
omap_mux_write(partition, board_mux->value,
|
||||
board_mux->reg_offset);
|
||||
|
@ -906,7 +909,7 @@ static struct omap_mux *omap_mux_get_by_gpio(
|
|||
u16 omap_mux_get_gpio(int gpio)
|
||||
{
|
||||
struct omap_mux_partition *partition;
|
||||
struct omap_mux *m;
|
||||
struct omap_mux *m = NULL;
|
||||
|
||||
list_for_each_entry(partition, &mux_partitions, node) {
|
||||
m = omap_mux_get_by_gpio(partition, gpio);
|
||||
|
|
|
@ -323,10 +323,12 @@ int omap3_mux_init(struct omap_board_mux *board_mux, int flags);
|
|||
|
||||
/**
|
||||
* omap4_mux_init() - initialize mux system with board specific set
|
||||
* @board_mux: Board specific mux table
|
||||
* @board_subset: Board specific mux table
|
||||
* @board_wkup_subset: Board specific mux table for wakeup instance
|
||||
* @flags: OMAP package type used for the board
|
||||
*/
|
||||
int omap4_mux_init(struct omap_board_mux *board_mux, int flags);
|
||||
int omap4_mux_init(struct omap_board_mux *board_subset,
|
||||
struct omap_board_mux *board_wkup_subset, int flags);
|
||||
|
||||
/**
|
||||
* omap_mux_init - private mux init function, do not call
|
||||
|
|
|
@ -1309,7 +1309,8 @@ static struct omap_ball __initdata omap4_wkup_cbl_cbs_ball[] = {
|
|||
#define omap4_wkup_cbl_cbs_ball NULL
|
||||
#endif
|
||||
|
||||
int __init omap4_mux_init(struct omap_board_mux *board_subset, int flags)
|
||||
int __init omap4_mux_init(struct omap_board_mux *board_subset,
|
||||
struct omap_board_mux *board_wkup_subset, int flags)
|
||||
{
|
||||
struct omap_ball *package_balls_core;
|
||||
struct omap_ball *package_balls_wkup = omap4_wkup_cbl_cbs_ball;
|
||||
|
@ -1347,7 +1348,7 @@ int __init omap4_mux_init(struct omap_board_mux *board_subset, int flags)
|
|||
OMAP_MUX_GPIO_IN_MODE3,
|
||||
OMAP4_CTRL_MODULE_PAD_WKUP_MUX_PBASE,
|
||||
OMAP4_CTRL_MODULE_PAD_WKUP_MUX_SIZE,
|
||||
omap4_wkup_muxmodes, NULL, board_subset,
|
||||
omap4_wkup_muxmodes, NULL, board_wkup_subset,
|
||||
package_balls_wkup);
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -1628,7 +1628,7 @@ int omap_hwmod_for_each(int (*fn)(struct omap_hwmod *oh, void *data),
|
|||
void *data)
|
||||
{
|
||||
struct omap_hwmod *temp_oh;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
if (!fn)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -5109,7 +5109,7 @@ static __initdata struct omap_hwmod *omap44xx_hwmods[] = {
|
|||
&omap44xx_iva_seq1_hwmod,
|
||||
|
||||
/* kbd class */
|
||||
/* &omap44xx_kbd_hwmod, */
|
||||
&omap44xx_kbd_hwmod,
|
||||
|
||||
/* mailbox class */
|
||||
&omap44xx_mailbox_hwmod,
|
||||
|
|
|
@ -56,8 +56,10 @@ int omap4430_phy_init(struct device *dev)
|
|||
/* Power down the phy */
|
||||
__raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF);
|
||||
|
||||
if (!dev)
|
||||
if (!dev) {
|
||||
iounmap(ctrl_base);
|
||||
return 0;
|
||||
}
|
||||
|
||||
phyclk = clk_get(dev, "ocp2scp_usb_phy_ick");
|
||||
if (IS_ERR(phyclk)) {
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include <linux/mtd/map.h>
|
||||
|
||||
struct platform_device;
|
||||
extern void omap1_set_vpp(struct platform_device *pdev, int enable);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -29,9 +29,6 @@ struct iovm_struct {
|
|||
* lower 16 bit is used for h/w and upper 16 bit is for s/w.
|
||||
*/
|
||||
#define IOVMF_SW_SHIFT 16
|
||||
#define IOVMF_HW_SIZE (1 << IOVMF_SW_SHIFT)
|
||||
#define IOVMF_HW_MASK (IOVMF_HW_SIZE - 1)
|
||||
#define IOVMF_SW_MASK (~IOVMF_HW_MASK)UL
|
||||
|
||||
/*
|
||||
* iovma: h/w flags derived from cam and ram attribute
|
||||
|
|
|
@ -101,6 +101,9 @@ struct omap_mmc_platform_data {
|
|||
/* If using power_saving and the MMC power is not to go off */
|
||||
unsigned no_off:1;
|
||||
|
||||
/* eMMC does not handle power off when not in sleep state */
|
||||
unsigned no_regulator_off_init:1;
|
||||
|
||||
/* Regulator off remapped to sleep */
|
||||
unsigned vcc_aux_disable_is_sleep:1;
|
||||
|
||||
|
|
|
@ -648,7 +648,6 @@ u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt,
|
|||
return PTR_ERR(va);
|
||||
}
|
||||
|
||||
flags &= IOVMF_HW_MASK;
|
||||
flags |= IOVMF_DISCONT;
|
||||
flags |= IOVMF_MMIO;
|
||||
|
||||
|
@ -706,7 +705,6 @@ u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
|
|||
if (!va)
|
||||
return -ENOMEM;
|
||||
|
||||
flags &= IOVMF_HW_MASK;
|
||||
flags |= IOVMF_DISCONT;
|
||||
flags |= IOVMF_ALLOC;
|
||||
|
||||
|
@ -795,7 +793,6 @@ u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes,
|
|||
if (!va)
|
||||
return -ENOMEM;
|
||||
|
||||
flags &= IOVMF_HW_MASK;
|
||||
flags |= IOVMF_LINEAR;
|
||||
flags |= IOVMF_MMIO;
|
||||
|
||||
|
@ -853,7 +850,6 @@ u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags)
|
|||
return -ENOMEM;
|
||||
pa = virt_to_phys(va);
|
||||
|
||||
flags &= IOVMF_HW_MASK;
|
||||
flags |= IOVMF_LINEAR;
|
||||
flags |= IOVMF_ALLOC;
|
||||
|
||||
|
|
|
@ -166,7 +166,7 @@ static void __init omap_detect_sram(void)
|
|||
else if (cpu_is_omap1611())
|
||||
omap_sram_size = SZ_256K;
|
||||
else {
|
||||
printk(KERN_ERR "Could not detect SRAM size\n");
|
||||
pr_err("Could not detect SRAM size\n");
|
||||
omap_sram_size = 0x4000;
|
||||
}
|
||||
}
|
||||
|
@ -221,10 +221,10 @@ static void __init omap_map_sram(void)
|
|||
omap_sram_io_desc[0].length = ROUND_DOWN(omap_sram_size, PAGE_SIZE);
|
||||
iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
|
||||
|
||||
printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
|
||||
__pfn_to_phys(omap_sram_io_desc[0].pfn),
|
||||
omap_sram_io_desc[0].virtual,
|
||||
omap_sram_io_desc[0].length);
|
||||
pr_info("SRAM: Mapped pa 0x%08llx to va 0x%08lx size: 0x%lx\n",
|
||||
(long long) __pfn_to_phys(omap_sram_io_desc[0].pfn),
|
||||
omap_sram_io_desc[0].virtual,
|
||||
omap_sram_io_desc[0].length);
|
||||
|
||||
/*
|
||||
* Normally devicemaps_init() would flush caches and tlb after
|
||||
|
@ -252,7 +252,7 @@ static void __init omap_map_sram(void)
|
|||
void *omap_sram_push_address(unsigned long size)
|
||||
{
|
||||
if (size > (omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ))) {
|
||||
printk(KERN_ERR "Not enough space in SRAM\n");
|
||||
pr_err("Not enough space in SRAM\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#include <linux/irq.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/smp.h>
|
||||
|
@ -156,7 +156,7 @@ int die_if_no_fixup(const char *str, struct pt_regs *regs,
|
|||
|
||||
case EXCEP_TRAP:
|
||||
case EXCEP_UNIMPINS:
|
||||
if (get_user(opcode, (uint8_t __user *)regs->pc) != 0)
|
||||
if (probe_kernel_read(&opcode, (u8 *)regs->pc, 1) < 0)
|
||||
break;
|
||||
if (opcode == 0xff) {
|
||||
if (notify_die(DIE_BREAKPOINT, str, regs, code, 0, 0))
|
||||
|
|
|
@ -44,6 +44,7 @@ SECTIONS
|
|||
RO_DATA(PAGE_SIZE)
|
||||
|
||||
/* writeable */
|
||||
_sdata = .; /* Start of rw data section */
|
||||
RW_DATA_SECTION(32, PAGE_SIZE, THREAD_SIZE)
|
||||
_edata = .;
|
||||
|
||||
|
|
|
@ -120,14 +120,14 @@ debugger_local_cache_flushinv_one:
|
|||
# conditionally purge this line in all ways
|
||||
mov d1,(L1_CACHE_WAYDISP*0,a0)
|
||||
|
||||
debugger_local_cache_flushinv_no_dcache:
|
||||
debugger_local_cache_flushinv_one_no_dcache:
|
||||
#
|
||||
# now try to flush the icache
|
||||
#
|
||||
mov CHCTR,a0
|
||||
movhu (a0),d0
|
||||
btst CHCTR_ICEN,d0
|
||||
beq mn10300_local_icache_inv_range_reg_end
|
||||
beq debugger_local_cache_flushinv_one_end
|
||||
|
||||
LOCAL_CLI_SAVE(d1)
|
||||
|
||||
|
|
|
@ -89,6 +89,7 @@ config S390
|
|||
select HAVE_GET_USER_PAGES_FAST
|
||||
select HAVE_ARCH_MUTEX_CPU_RELAX
|
||||
select HAVE_ARCH_JUMP_LABEL if !MARCH_G5
|
||||
select HAVE_RCU_TABLE_FREE if SMP
|
||||
select ARCH_INLINE_SPIN_TRYLOCK
|
||||
select ARCH_INLINE_SPIN_TRYLOCK_BH
|
||||
select ARCH_INLINE_SPIN_LOCK
|
||||
|
|
|
@ -17,15 +17,15 @@
|
|||
#include <linux/gfp.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#define check_pgt_cache() do {} while (0)
|
||||
|
||||
unsigned long *crst_table_alloc(struct mm_struct *);
|
||||
void crst_table_free(struct mm_struct *, unsigned long *);
|
||||
void crst_table_free_rcu(struct mm_struct *, unsigned long *);
|
||||
|
||||
unsigned long *page_table_alloc(struct mm_struct *);
|
||||
void page_table_free(struct mm_struct *, unsigned long *);
|
||||
void page_table_free_rcu(struct mm_struct *, unsigned long *);
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
void page_table_free_rcu(struct mmu_gather *, unsigned long *);
|
||||
void __tlb_remove_table(void *_table);
|
||||
#endif
|
||||
|
||||
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
|
||||
{
|
||||
|
|
|
@ -293,19 +293,6 @@ extern unsigned long VMALLOC_START;
|
|||
* swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
|
||||
*/
|
||||
|
||||
/* Page status table bits for virtualization */
|
||||
#define RCP_ACC_BITS 0xf000000000000000UL
|
||||
#define RCP_FP_BIT 0x0800000000000000UL
|
||||
#define RCP_PCL_BIT 0x0080000000000000UL
|
||||
#define RCP_HR_BIT 0x0040000000000000UL
|
||||
#define RCP_HC_BIT 0x0020000000000000UL
|
||||
#define RCP_GR_BIT 0x0004000000000000UL
|
||||
#define RCP_GC_BIT 0x0002000000000000UL
|
||||
|
||||
/* User dirty / referenced bit for KVM's migration feature */
|
||||
#define KVM_UR_BIT 0x0000800000000000UL
|
||||
#define KVM_UC_BIT 0x0000400000000000UL
|
||||
|
||||
#ifndef __s390x__
|
||||
|
||||
/* Bits in the segment table address-space-control-element */
|
||||
|
@ -325,6 +312,19 @@ extern unsigned long VMALLOC_START;
|
|||
#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
|
||||
#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
|
||||
|
||||
/* Page status table bits for virtualization */
|
||||
#define RCP_ACC_BITS 0xf0000000UL
|
||||
#define RCP_FP_BIT 0x08000000UL
|
||||
#define RCP_PCL_BIT 0x00800000UL
|
||||
#define RCP_HR_BIT 0x00400000UL
|
||||
#define RCP_HC_BIT 0x00200000UL
|
||||
#define RCP_GR_BIT 0x00040000UL
|
||||
#define RCP_GC_BIT 0x00020000UL
|
||||
|
||||
/* User dirty / referenced bit for KVM's migration feature */
|
||||
#define KVM_UR_BIT 0x00008000UL
|
||||
#define KVM_UC_BIT 0x00004000UL
|
||||
|
||||
#else /* __s390x__ */
|
||||
|
||||
/* Bits in the segment/region table address-space-control-element */
|
||||
|
@ -367,6 +367,19 @@ extern unsigned long VMALLOC_START;
|
|||
#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
|
||||
#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
|
||||
|
||||
/* Page status table bits for virtualization */
|
||||
#define RCP_ACC_BITS 0xf000000000000000UL
|
||||
#define RCP_FP_BIT 0x0800000000000000UL
|
||||
#define RCP_PCL_BIT 0x0080000000000000UL
|
||||
#define RCP_HR_BIT 0x0040000000000000UL
|
||||
#define RCP_HC_BIT 0x0020000000000000UL
|
||||
#define RCP_GR_BIT 0x0004000000000000UL
|
||||
#define RCP_GC_BIT 0x0002000000000000UL
|
||||
|
||||
/* User dirty / referenced bit for KVM's migration feature */
|
||||
#define KVM_UR_BIT 0x0000800000000000UL
|
||||
#define KVM_UC_BIT 0x0000400000000000UL
|
||||
|
||||
#endif /* __s390x__ */
|
||||
|
||||
/*
|
||||
|
|
|
@ -139,110 +139,47 @@ struct slib {
|
|||
struct slibe slibe[QDIO_MAX_BUFFERS_PER_Q];
|
||||
} __attribute__ ((packed, aligned(2048)));
|
||||
|
||||
/**
|
||||
* struct sbal_flags - storage block address list flags
|
||||
* @last: last entry
|
||||
* @cont: contiguous storage
|
||||
* @frag: fragmentation
|
||||
*/
|
||||
struct sbal_flags {
|
||||
u8 : 1;
|
||||
u8 last : 1;
|
||||
u8 cont : 1;
|
||||
u8 : 1;
|
||||
u8 frag : 2;
|
||||
u8 : 2;
|
||||
} __attribute__ ((packed));
|
||||
#define SBAL_EFLAGS_LAST_ENTRY 0x40
|
||||
#define SBAL_EFLAGS_CONTIGUOUS 0x20
|
||||
#define SBAL_EFLAGS_FIRST_FRAG 0x04
|
||||
#define SBAL_EFLAGS_MIDDLE_FRAG 0x08
|
||||
#define SBAL_EFLAGS_LAST_FRAG 0x0c
|
||||
#define SBAL_EFLAGS_MASK 0x6f
|
||||
|
||||
#define SBAL_FLAGS_FIRST_FRAG 0x04000000UL
|
||||
#define SBAL_FLAGS_MIDDLE_FRAG 0x08000000UL
|
||||
#define SBAL_FLAGS_LAST_FRAG 0x0c000000UL
|
||||
#define SBAL_FLAGS_LAST_ENTRY 0x40000000UL
|
||||
#define SBAL_FLAGS_CONTIGUOUS 0x20000000UL
|
||||
|
||||
#define SBAL_FLAGS0_DATA_CONTINUATION 0x20UL
|
||||
#define SBAL_SFLAGS0_PCI_REQ 0x40
|
||||
#define SBAL_SFLAGS0_DATA_CONTINUATION 0x20
|
||||
|
||||
/* Awesome OpenFCP extensions */
|
||||
#define SBAL_FLAGS0_TYPE_STATUS 0x00UL
|
||||
#define SBAL_FLAGS0_TYPE_WRITE 0x08UL
|
||||
#define SBAL_FLAGS0_TYPE_READ 0x10UL
|
||||
#define SBAL_FLAGS0_TYPE_WRITE_READ 0x18UL
|
||||
#define SBAL_FLAGS0_MORE_SBALS 0x04UL
|
||||
#define SBAL_FLAGS0_COMMAND 0x02UL
|
||||
#define SBAL_FLAGS0_LAST_SBAL 0x00UL
|
||||
#define SBAL_FLAGS0_ONLY_SBAL SBAL_FLAGS0_COMMAND
|
||||
#define SBAL_FLAGS0_MIDDLE_SBAL SBAL_FLAGS0_MORE_SBALS
|
||||
#define SBAL_FLAGS0_FIRST_SBAL SBAL_FLAGS0_MORE_SBALS | SBAL_FLAGS0_COMMAND
|
||||
#define SBAL_FLAGS0_PCI 0x40
|
||||
|
||||
/**
|
||||
* struct sbal_sbalf_0 - sbal flags for sbale 0
|
||||
* @pci: PCI indicator
|
||||
* @cont: data continuation
|
||||
* @sbtype: storage-block type (FCP)
|
||||
*/
|
||||
struct sbal_sbalf_0 {
|
||||
u8 : 1;
|
||||
u8 pci : 1;
|
||||
u8 cont : 1;
|
||||
u8 sbtype : 2;
|
||||
u8 : 3;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/**
|
||||
* struct sbal_sbalf_1 - sbal flags for sbale 1
|
||||
* @key: storage key
|
||||
*/
|
||||
struct sbal_sbalf_1 {
|
||||
u8 : 4;
|
||||
u8 key : 4;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/**
|
||||
* struct sbal_sbalf_14 - sbal flags for sbale 14
|
||||
* @erridx: error index
|
||||
*/
|
||||
struct sbal_sbalf_14 {
|
||||
u8 : 4;
|
||||
u8 erridx : 4;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/**
|
||||
* struct sbal_sbalf_15 - sbal flags for sbale 15
|
||||
* @reason: reason for error state
|
||||
*/
|
||||
struct sbal_sbalf_15 {
|
||||
u8 reason;
|
||||
} __attribute__ ((packed));
|
||||
|
||||
/**
|
||||
* union sbal_sbalf - storage block address list flags
|
||||
* @i0: sbalf0
|
||||
* @i1: sbalf1
|
||||
* @i14: sbalf14
|
||||
* @i15: sblaf15
|
||||
* @value: raw value
|
||||
*/
|
||||
union sbal_sbalf {
|
||||
struct sbal_sbalf_0 i0;
|
||||
struct sbal_sbalf_1 i1;
|
||||
struct sbal_sbalf_14 i14;
|
||||
struct sbal_sbalf_15 i15;
|
||||
u8 value;
|
||||
};
|
||||
#define SBAL_SFLAGS0_TYPE_STATUS 0x00
|
||||
#define SBAL_SFLAGS0_TYPE_WRITE 0x08
|
||||
#define SBAL_SFLAGS0_TYPE_READ 0x10
|
||||
#define SBAL_SFLAGS0_TYPE_WRITE_READ 0x18
|
||||
#define SBAL_SFLAGS0_MORE_SBALS 0x04
|
||||
#define SBAL_SFLAGS0_COMMAND 0x02
|
||||
#define SBAL_SFLAGS0_LAST_SBAL 0x00
|
||||
#define SBAL_SFLAGS0_ONLY_SBAL SBAL_SFLAGS0_COMMAND
|
||||
#define SBAL_SFLAGS0_MIDDLE_SBAL SBAL_SFLAGS0_MORE_SBALS
|
||||
#define SBAL_SFLAGS0_FIRST_SBAL (SBAL_SFLAGS0_MORE_SBALS | SBAL_SFLAGS0_COMMAND)
|
||||
|
||||
/**
|
||||
* struct qdio_buffer_element - SBAL entry
|
||||
* @flags: flags
|
||||
* @eflags: SBAL entry flags
|
||||
* @scount: SBAL count
|
||||
* @sflags: whole SBAL flags
|
||||
* @length: length
|
||||
* @addr: address
|
||||
*/
|
||||
struct qdio_buffer_element {
|
||||
u32 flags;
|
||||
u8 eflags;
|
||||
/* private: */
|
||||
u8 res1;
|
||||
/* public: */
|
||||
u8 scount;
|
||||
u8 sflags;
|
||||
u32 length;
|
||||
#ifdef CONFIG_32BIT
|
||||
/* private: */
|
||||
void *reserved;
|
||||
void *res2;
|
||||
/* public: */
|
||||
#endif
|
||||
void *addr;
|
||||
|
|
|
@ -26,67 +26,60 @@
|
|||
#include <linux/swap.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/tlbflush.h>
|
||||
|
||||
struct mmu_gather {
|
||||
struct mm_struct *mm;
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
struct mmu_table_batch *batch;
|
||||
#endif
|
||||
unsigned int fullmm;
|
||||
unsigned int nr_ptes;
|
||||
unsigned int nr_pxds;
|
||||
unsigned int max;
|
||||
void **array;
|
||||
void *local[8];
|
||||
unsigned int need_flush;
|
||||
};
|
||||
|
||||
static inline void __tlb_alloc_page(struct mmu_gather *tlb)
|
||||
{
|
||||
unsigned long addr = __get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
struct mmu_table_batch {
|
||||
struct rcu_head rcu;
|
||||
unsigned int nr;
|
||||
void *tables[0];
|
||||
};
|
||||
|
||||
if (addr) {
|
||||
tlb->array = (void *) addr;
|
||||
tlb->max = PAGE_SIZE / sizeof(void *);
|
||||
}
|
||||
}
|
||||
#define MAX_TABLE_BATCH \
|
||||
((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
|
||||
|
||||
extern void tlb_table_flush(struct mmu_gather *tlb);
|
||||
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
|
||||
#endif
|
||||
|
||||
static inline void tlb_gather_mmu(struct mmu_gather *tlb,
|
||||
struct mm_struct *mm,
|
||||
unsigned int full_mm_flush)
|
||||
{
|
||||
tlb->mm = mm;
|
||||
tlb->max = ARRAY_SIZE(tlb->local);
|
||||
tlb->array = tlb->local;
|
||||
tlb->fullmm = full_mm_flush;
|
||||
tlb->need_flush = 0;
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
tlb->batch = NULL;
|
||||
#endif
|
||||
if (tlb->fullmm)
|
||||
__tlb_flush_mm(mm);
|
||||
else
|
||||
__tlb_alloc_page(tlb);
|
||||
tlb->nr_ptes = 0;
|
||||
tlb->nr_pxds = tlb->max;
|
||||
}
|
||||
|
||||
static inline void tlb_flush_mmu(struct mmu_gather *tlb)
|
||||
{
|
||||
if (!tlb->fullmm && (tlb->nr_ptes > 0 || tlb->nr_pxds < tlb->max))
|
||||
__tlb_flush_mm(tlb->mm);
|
||||
while (tlb->nr_ptes > 0)
|
||||
page_table_free_rcu(tlb->mm, tlb->array[--tlb->nr_ptes]);
|
||||
while (tlb->nr_pxds < tlb->max)
|
||||
crst_table_free_rcu(tlb->mm, tlb->array[tlb->nr_pxds++]);
|
||||
if (!tlb->need_flush)
|
||||
return;
|
||||
tlb->need_flush = 0;
|
||||
__tlb_flush_mm(tlb->mm);
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
tlb_table_flush(tlb);
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void tlb_finish_mmu(struct mmu_gather *tlb,
|
||||
unsigned long start, unsigned long end)
|
||||
{
|
||||
tlb_flush_mmu(tlb);
|
||||
|
||||
rcu_table_freelist_finish();
|
||||
|
||||
/* keep the page table cache within bounds */
|
||||
check_pgt_cache();
|
||||
|
||||
if (tlb->array != tlb->local)
|
||||
free_pages((unsigned long) tlb->array, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -112,12 +105,11 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
|
|||
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
|
||||
unsigned long address)
|
||||
{
|
||||
if (!tlb->fullmm) {
|
||||
tlb->array[tlb->nr_ptes++] = pte;
|
||||
if (tlb->nr_ptes >= tlb->nr_pxds)
|
||||
tlb_flush_mmu(tlb);
|
||||
} else
|
||||
page_table_free(tlb->mm, (unsigned long *) pte);
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
if (!tlb->fullmm)
|
||||
return page_table_free_rcu(tlb, (unsigned long *) pte);
|
||||
#endif
|
||||
page_table_free(tlb->mm, (unsigned long *) pte);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -133,12 +125,11 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
|
|||
#ifdef __s390x__
|
||||
if (tlb->mm->context.asce_limit <= (1UL << 31))
|
||||
return;
|
||||
if (!tlb->fullmm) {
|
||||
tlb->array[--tlb->nr_pxds] = pmd;
|
||||
if (tlb->nr_ptes >= tlb->nr_pxds)
|
||||
tlb_flush_mmu(tlb);
|
||||
} else
|
||||
crst_table_free(tlb->mm, (unsigned long *) pmd);
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
if (!tlb->fullmm)
|
||||
return tlb_remove_table(tlb, pmd);
|
||||
#endif
|
||||
crst_table_free(tlb->mm, (unsigned long *) pmd);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -155,12 +146,11 @@ static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
|
|||
#ifdef __s390x__
|
||||
if (tlb->mm->context.asce_limit <= (1UL << 42))
|
||||
return;
|
||||
if (!tlb->fullmm) {
|
||||
tlb->array[--tlb->nr_pxds] = pud;
|
||||
if (tlb->nr_ptes >= tlb->nr_pxds)
|
||||
tlb_flush_mmu(tlb);
|
||||
} else
|
||||
crst_table_free(tlb->mm, (unsigned long *) pud);
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
if (!tlb->fullmm)
|
||||
return tlb_remove_table(tlb, pud);
|
||||
#endif
|
||||
crst_table_free(tlb->mm, (unsigned long *) pud);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -731,6 +731,7 @@ static int __init kvm_s390_init(void)
|
|||
}
|
||||
memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
|
||||
facilities[0] &= 0xff00fff3f47c0000ULL;
|
||||
facilities[1] &= 0x201c000000000000ULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -93,4 +93,6 @@ sie_err:
|
|||
|
||||
.section __ex_table,"a"
|
||||
.quad sie_inst,sie_err
|
||||
.quad sie_exit,sie_err
|
||||
.quad sie_reenter,sie_err
|
||||
.previous
|
||||
|
|
|
@ -24,94 +24,12 @@
|
|||
#include <asm/tlbflush.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
struct rcu_table_freelist {
|
||||
struct rcu_head rcu;
|
||||
struct mm_struct *mm;
|
||||
unsigned int pgt_index;
|
||||
unsigned int crst_index;
|
||||
unsigned long *table[0];
|
||||
};
|
||||
|
||||
#define RCU_FREELIST_SIZE \
|
||||
((PAGE_SIZE - sizeof(struct rcu_table_freelist)) \
|
||||
/ sizeof(unsigned long))
|
||||
|
||||
static DEFINE_PER_CPU(struct rcu_table_freelist *, rcu_table_freelist);
|
||||
|
||||
static void __page_table_free(struct mm_struct *mm, unsigned long *table);
|
||||
|
||||
static struct rcu_table_freelist *rcu_table_freelist_get(struct mm_struct *mm)
|
||||
{
|
||||
struct rcu_table_freelist **batchp = &__get_cpu_var(rcu_table_freelist);
|
||||
struct rcu_table_freelist *batch = *batchp;
|
||||
|
||||
if (batch)
|
||||
return batch;
|
||||
batch = (struct rcu_table_freelist *) __get_free_page(GFP_ATOMIC);
|
||||
if (batch) {
|
||||
batch->mm = mm;
|
||||
batch->pgt_index = 0;
|
||||
batch->crst_index = RCU_FREELIST_SIZE;
|
||||
*batchp = batch;
|
||||
}
|
||||
return batch;
|
||||
}
|
||||
|
||||
static void rcu_table_freelist_callback(struct rcu_head *head)
|
||||
{
|
||||
struct rcu_table_freelist *batch =
|
||||
container_of(head, struct rcu_table_freelist, rcu);
|
||||
|
||||
while (batch->pgt_index > 0)
|
||||
__page_table_free(batch->mm, batch->table[--batch->pgt_index]);
|
||||
while (batch->crst_index < RCU_FREELIST_SIZE)
|
||||
crst_table_free(batch->mm, batch->table[batch->crst_index++]);
|
||||
free_page((unsigned long) batch);
|
||||
}
|
||||
|
||||
void rcu_table_freelist_finish(void)
|
||||
{
|
||||
struct rcu_table_freelist **batchp = &get_cpu_var(rcu_table_freelist);
|
||||
struct rcu_table_freelist *batch = *batchp;
|
||||
|
||||
if (!batch)
|
||||
goto out;
|
||||
call_rcu(&batch->rcu, rcu_table_freelist_callback);
|
||||
*batchp = NULL;
|
||||
out:
|
||||
put_cpu_var(rcu_table_freelist);
|
||||
}
|
||||
|
||||
static void smp_sync(void *arg)
|
||||
{
|
||||
}
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
#define ALLOC_ORDER 1
|
||||
#define TABLES_PER_PAGE 4
|
||||
#define FRAG_MASK 15UL
|
||||
#define SECOND_HALVES 10UL
|
||||
|
||||
void clear_table_pgstes(unsigned long *table)
|
||||
{
|
||||
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
|
||||
memset(table + 256, 0, PAGE_SIZE/4);
|
||||
clear_table(table + 512, _PAGE_TYPE_EMPTY, PAGE_SIZE/4);
|
||||
memset(table + 768, 0, PAGE_SIZE/4);
|
||||
}
|
||||
|
||||
#define FRAG_MASK 0x0f
|
||||
#else
|
||||
#define ALLOC_ORDER 2
|
||||
#define TABLES_PER_PAGE 2
|
||||
#define FRAG_MASK 3UL
|
||||
#define SECOND_HALVES 2UL
|
||||
|
||||
void clear_table_pgstes(unsigned long *table)
|
||||
{
|
||||
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
|
||||
memset(table + 256, 0, PAGE_SIZE/2);
|
||||
}
|
||||
|
||||
#define FRAG_MASK 0x03
|
||||
#endif
|
||||
|
||||
unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
|
||||
|
@ -140,29 +58,6 @@ void crst_table_free(struct mm_struct *mm, unsigned long *table)
|
|||
free_pages((unsigned long) table, ALLOC_ORDER);
|
||||
}
|
||||
|
||||
void crst_table_free_rcu(struct mm_struct *mm, unsigned long *table)
|
||||
{
|
||||
struct rcu_table_freelist *batch;
|
||||
|
||||
preempt_disable();
|
||||
if (atomic_read(&mm->mm_users) < 2 &&
|
||||
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
|
||||
crst_table_free(mm, table);
|
||||
goto out;
|
||||
}
|
||||
batch = rcu_table_freelist_get(mm);
|
||||
if (!batch) {
|
||||
smp_call_function(smp_sync, NULL, 1);
|
||||
crst_table_free(mm, table);
|
||||
goto out;
|
||||
}
|
||||
batch->table[--batch->crst_index] = table;
|
||||
if (batch->pgt_index >= batch->crst_index)
|
||||
rcu_table_freelist_finish();
|
||||
out:
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
|
||||
{
|
||||
|
@ -238,124 +133,175 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
|
|||
}
|
||||
#endif
|
||||
|
||||
static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
|
||||
{
|
||||
unsigned int old, new;
|
||||
|
||||
do {
|
||||
old = atomic_read(v);
|
||||
new = old ^ bits;
|
||||
} while (atomic_cmpxchg(v, old, new) != old);
|
||||
return new;
|
||||
}
|
||||
|
||||
/*
|
||||
* page table entry allocation/free routines.
|
||||
*/
|
||||
#ifdef CONFIG_PGSTE
|
||||
static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long *table;
|
||||
|
||||
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
|
||||
if (!page)
|
||||
return NULL;
|
||||
pgtable_page_ctor(page);
|
||||
atomic_set(&page->_mapcount, 3);
|
||||
table = (unsigned long *) page_to_phys(page);
|
||||
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
|
||||
clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
|
||||
return table;
|
||||
}
|
||||
|
||||
static inline void page_table_free_pgste(unsigned long *table)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
|
||||
pgtable_page_ctor(page);
|
||||
atomic_set(&page->_mapcount, -1);
|
||||
__free_page(page);
|
||||
}
|
||||
#endif
|
||||
|
||||
unsigned long *page_table_alloc(struct mm_struct *mm)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long *table;
|
||||
unsigned long bits;
|
||||
unsigned int mask, bit;
|
||||
|
||||
bits = (mm->context.has_pgste) ? 3UL : 1UL;
|
||||
#ifdef CONFIG_PGSTE
|
||||
if (mm_has_pgste(mm))
|
||||
return page_table_alloc_pgste(mm);
|
||||
#endif
|
||||
/* Allocate fragments of a 4K page as 1K/2K page table */
|
||||
spin_lock_bh(&mm->context.list_lock);
|
||||
page = NULL;
|
||||
mask = FRAG_MASK;
|
||||
if (!list_empty(&mm->context.pgtable_list)) {
|
||||
page = list_first_entry(&mm->context.pgtable_list,
|
||||
struct page, lru);
|
||||
if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
|
||||
page = NULL;
|
||||
table = (unsigned long *) page_to_phys(page);
|
||||
mask = atomic_read(&page->_mapcount);
|
||||
mask = mask | (mask >> 4);
|
||||
}
|
||||
if (!page) {
|
||||
if ((mask & FRAG_MASK) == FRAG_MASK) {
|
||||
spin_unlock_bh(&mm->context.list_lock);
|
||||
page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
|
||||
if (!page)
|
||||
return NULL;
|
||||
pgtable_page_ctor(page);
|
||||
page->flags &= ~FRAG_MASK;
|
||||
atomic_set(&page->_mapcount, 1);
|
||||
table = (unsigned long *) page_to_phys(page);
|
||||
if (mm->context.has_pgste)
|
||||
clear_table_pgstes(table);
|
||||
else
|
||||
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
|
||||
clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
|
||||
spin_lock_bh(&mm->context.list_lock);
|
||||
list_add(&page->lru, &mm->context.pgtable_list);
|
||||
} else {
|
||||
for (bit = 1; mask & bit; bit <<= 1)
|
||||
table += PTRS_PER_PTE;
|
||||
mask = atomic_xor_bits(&page->_mapcount, bit);
|
||||
if ((mask & FRAG_MASK) == FRAG_MASK)
|
||||
list_del(&page->lru);
|
||||
}
|
||||
table = (unsigned long *) page_to_phys(page);
|
||||
while (page->flags & bits) {
|
||||
table += 256;
|
||||
bits <<= 1;
|
||||
}
|
||||
page->flags |= bits;
|
||||
if ((page->flags & FRAG_MASK) == ((1UL << TABLES_PER_PAGE) - 1))
|
||||
list_move_tail(&page->lru, &mm->context.pgtable_list);
|
||||
spin_unlock_bh(&mm->context.list_lock);
|
||||
return table;
|
||||
}
|
||||
|
||||
static void __page_table_free(struct mm_struct *mm, unsigned long *table)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long bits;
|
||||
|
||||
bits = ((unsigned long) table) & 15;
|
||||
table = (unsigned long *)(((unsigned long) table) ^ bits);
|
||||
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
|
||||
page->flags ^= bits;
|
||||
if (!(page->flags & FRAG_MASK)) {
|
||||
pgtable_page_dtor(page);
|
||||
__free_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
void page_table_free(struct mm_struct *mm, unsigned long *table)
|
||||
{
|
||||
struct page *page;
|
||||
unsigned long bits;
|
||||
unsigned int bit, mask;
|
||||
|
||||
bits = (mm->context.has_pgste) ? 3UL : 1UL;
|
||||
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
|
||||
#ifdef CONFIG_PGSTE
|
||||
if (mm_has_pgste(mm))
|
||||
return page_table_free_pgste(table);
|
||||
#endif
|
||||
/* Free 1K/2K page table fragment of a 4K page */
|
||||
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
|
||||
bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
|
||||
spin_lock_bh(&mm->context.list_lock);
|
||||
page->flags ^= bits;
|
||||
if (page->flags & FRAG_MASK) {
|
||||
/* Page now has some free pgtable fragments. */
|
||||
if (!list_empty(&page->lru))
|
||||
list_move(&page->lru, &mm->context.pgtable_list);
|
||||
page = NULL;
|
||||
} else
|
||||
/* All fragments of the 4K page have been freed. */
|
||||
if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
|
||||
list_del(&page->lru);
|
||||
mask = atomic_xor_bits(&page->_mapcount, bit);
|
||||
if (mask & FRAG_MASK)
|
||||
list_add(&page->lru, &mm->context.pgtable_list);
|
||||
spin_unlock_bh(&mm->context.list_lock);
|
||||
if (page) {
|
||||
if (mask == 0) {
|
||||
pgtable_page_dtor(page);
|
||||
atomic_set(&page->_mapcount, -1);
|
||||
__free_page(page);
|
||||
}
|
||||
}
|
||||
|
||||
void page_table_free_rcu(struct mm_struct *mm, unsigned long *table)
|
||||
{
|
||||
struct rcu_table_freelist *batch;
|
||||
struct page *page;
|
||||
unsigned long bits;
|
||||
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
|
||||
|
||||
preempt_disable();
|
||||
if (atomic_read(&mm->mm_users) < 2 &&
|
||||
cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) {
|
||||
page_table_free(mm, table);
|
||||
goto out;
|
||||
static void __page_table_free_rcu(void *table, unsigned bit)
|
||||
{
|
||||
struct page *page;
|
||||
|
||||
#ifdef CONFIG_PGSTE
|
||||
if (bit == FRAG_MASK)
|
||||
return page_table_free_pgste(table);
|
||||
#endif
|
||||
/* Free 1K/2K page table fragment of a 4K page */
|
||||
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
|
||||
if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
|
||||
pgtable_page_dtor(page);
|
||||
atomic_set(&page->_mapcount, -1);
|
||||
__free_page(page);
|
||||
}
|
||||
batch = rcu_table_freelist_get(mm);
|
||||
if (!batch) {
|
||||
smp_call_function(smp_sync, NULL, 1);
|
||||
page_table_free(mm, table);
|
||||
goto out;
|
||||
}
|
||||
|
||||
void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
struct page *page;
|
||||
unsigned int bit, mask;
|
||||
|
||||
mm = tlb->mm;
|
||||
#ifdef CONFIG_PGSTE
|
||||
if (mm_has_pgste(mm)) {
|
||||
table = (unsigned long *) (__pa(table) | FRAG_MASK);
|
||||
tlb_remove_table(tlb, table);
|
||||
return;
|
||||
}
|
||||
bits = (mm->context.has_pgste) ? 3UL : 1UL;
|
||||
bits <<= (__pa(table) & (PAGE_SIZE - 1)) / 256 / sizeof(unsigned long);
|
||||
#endif
|
||||
bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
|
||||
page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
|
||||
spin_lock_bh(&mm->context.list_lock);
|
||||
/* Delayed freeing with rcu prevents reuse of pgtable fragments */
|
||||
list_del_init(&page->lru);
|
||||
if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
|
||||
list_del(&page->lru);
|
||||
mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
|
||||
if (mask & FRAG_MASK)
|
||||
list_add_tail(&page->lru, &mm->context.pgtable_list);
|
||||
spin_unlock_bh(&mm->context.list_lock);
|
||||
table = (unsigned long *)(((unsigned long) table) | bits);
|
||||
batch->table[batch->pgt_index++] = table;
|
||||
if (batch->pgt_index >= batch->crst_index)
|
||||
rcu_table_freelist_finish();
|
||||
out:
|
||||
preempt_enable();
|
||||
table = (unsigned long *) (__pa(table) | (bit << 4));
|
||||
tlb_remove_table(tlb, table);
|
||||
}
|
||||
|
||||
void __tlb_remove_table(void *_table)
|
||||
{
|
||||
void *table = (void *)((unsigned long) _table & PAGE_MASK);
|
||||
unsigned type = (unsigned long) _table & ~PAGE_MASK;
|
||||
|
||||
if (type)
|
||||
__page_table_free_rcu(table, type);
|
||||
else
|
||||
free_pages((unsigned long) table, ALLOC_ORDER);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* switch on pgstes for its userspace process (for kvm)
|
||||
*/
|
||||
|
@ -369,7 +315,7 @@ int s390_enable_sie(void)
|
|||
return -EINVAL;
|
||||
|
||||
/* Do we have pgstes? if yes, we are done */
|
||||
if (tsk->mm->context.has_pgste)
|
||||
if (mm_has_pgste(tsk->mm))
|
||||
return 0;
|
||||
|
||||
/* lets check if we are allowed to replace the mm */
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include <asm/proto.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/gart.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/amd_iommu_proto.h>
|
||||
#include <asm/amd_iommu_types.h>
|
||||
#include <asm/amd_iommu.h>
|
||||
|
@ -154,6 +155,10 @@ static int iommu_init_device(struct device *dev)
|
|||
pdev = pci_get_bus_and_slot(PCI_BUS(alias), alias & 0xff);
|
||||
if (pdev)
|
||||
dev_data->alias = &pdev->dev;
|
||||
else {
|
||||
kfree(dev_data);
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
atomic_set(&dev_data->bind, 0);
|
||||
|
||||
|
@ -163,6 +168,20 @@ static int iommu_init_device(struct device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void iommu_ignore_device(struct device *dev)
|
||||
{
|
||||
u16 devid, alias;
|
||||
|
||||
devid = get_device_id(dev);
|
||||
alias = amd_iommu_alias_table[devid];
|
||||
|
||||
memset(&amd_iommu_dev_table[devid], 0, sizeof(struct dev_table_entry));
|
||||
memset(&amd_iommu_dev_table[alias], 0, sizeof(struct dev_table_entry));
|
||||
|
||||
amd_iommu_rlookup_table[devid] = NULL;
|
||||
amd_iommu_rlookup_table[alias] = NULL;
|
||||
}
|
||||
|
||||
static void iommu_uninit_device(struct device *dev)
|
||||
{
|
||||
kfree(dev->archdata.iommu);
|
||||
|
@ -192,7 +211,9 @@ int __init amd_iommu_init_devices(void)
|
|||
continue;
|
||||
|
||||
ret = iommu_init_device(&pdev->dev);
|
||||
if (ret)
|
||||
if (ret == -ENOTSUPP)
|
||||
iommu_ignore_device(&pdev->dev);
|
||||
else if (ret)
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
|
@ -2383,6 +2404,23 @@ static struct dma_map_ops amd_iommu_dma_ops = {
|
|||
.dma_supported = amd_iommu_dma_supported,
|
||||
};
|
||||
|
||||
static unsigned device_dma_ops_init(void)
|
||||
{
|
||||
struct pci_dev *pdev = NULL;
|
||||
unsigned unhandled = 0;
|
||||
|
||||
for_each_pci_dev(pdev) {
|
||||
if (!check_device(&pdev->dev)) {
|
||||
unhandled += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
pdev->dev.archdata.dma_ops = &amd_iommu_dma_ops;
|
||||
}
|
||||
|
||||
return unhandled;
|
||||
}
|
||||
|
||||
/*
|
||||
* The function which clues the AMD IOMMU driver into dma_ops.
|
||||
*/
|
||||
|
@ -2395,7 +2433,7 @@ void __init amd_iommu_init_api(void)
|
|||
int __init amd_iommu_init_dma_ops(void)
|
||||
{
|
||||
struct amd_iommu *iommu;
|
||||
int ret;
|
||||
int ret, unhandled;
|
||||
|
||||
/*
|
||||
* first allocate a default protection domain for every IOMMU we
|
||||
|
@ -2421,7 +2459,11 @@ int __init amd_iommu_init_dma_ops(void)
|
|||
swiotlb = 0;
|
||||
|
||||
/* Make the driver finally visible to the drivers */
|
||||
dma_ops = &amd_iommu_dma_ops;
|
||||
unhandled = device_dma_ops_init();
|
||||
if (unhandled && max_pfn > MAX_DMA32_PFN) {
|
||||
/* There are unhandled devices - initialize swiotlb for them */
|
||||
swiotlb = 1;
|
||||
}
|
||||
|
||||
amd_iommu_stats_init();
|
||||
|
||||
|
|
|
@ -731,8 +731,8 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
|||
{
|
||||
u8 *p = (u8 *)h;
|
||||
u8 *end = p, flags = 0;
|
||||
u16 dev_i, devid = 0, devid_start = 0, devid_to = 0;
|
||||
u32 ext_flags = 0;
|
||||
u16 devid = 0, devid_start = 0, devid_to = 0;
|
||||
u32 dev_i, ext_flags = 0;
|
||||
bool alias = false;
|
||||
struct ivhd_entry *e;
|
||||
|
||||
|
@ -887,7 +887,7 @@ static void __init init_iommu_from_acpi(struct amd_iommu *iommu,
|
|||
/* Initializes the device->iommu mapping for the driver */
|
||||
static int __init init_iommu_devices(struct amd_iommu *iommu)
|
||||
{
|
||||
u16 i;
|
||||
u32 i;
|
||||
|
||||
for (i = iommu->first_device; i <= iommu->last_device; ++i)
|
||||
set_iommu_for_device(iommu, i);
|
||||
|
@ -1177,7 +1177,7 @@ static int __init init_memory_definitions(struct acpi_table_header *table)
|
|||
*/
|
||||
static void init_device_table(void)
|
||||
{
|
||||
u16 devid;
|
||||
u32 devid;
|
||||
|
||||
for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) {
|
||||
set_dev_entry_bit(devid, DEV_ENTRY_VALID);
|
||||
|
|
|
@ -47,38 +47,40 @@
|
|||
#define DstDI (5<<1) /* Destination is in ES:(E)DI */
|
||||
#define DstMem64 (6<<1) /* 64bit memory operand */
|
||||
#define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */
|
||||
#define DstMask (7<<1)
|
||||
#define DstDX (8<<1) /* Destination is in DX register */
|
||||
#define DstMask (0xf<<1)
|
||||
/* Source operand type. */
|
||||
#define SrcNone (0<<4) /* No source operand. */
|
||||
#define SrcReg (1<<4) /* Register operand. */
|
||||
#define SrcMem (2<<4) /* Memory operand. */
|
||||
#define SrcMem16 (3<<4) /* Memory operand (16-bit). */
|
||||
#define SrcMem32 (4<<4) /* Memory operand (32-bit). */
|
||||
#define SrcImm (5<<4) /* Immediate operand. */
|
||||
#define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */
|
||||
#define SrcOne (7<<4) /* Implied '1' */
|
||||
#define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */
|
||||
#define SrcImmU (9<<4) /* Immediate operand, unsigned */
|
||||
#define SrcSI (0xa<<4) /* Source is in the DS:RSI */
|
||||
#define SrcImmFAddr (0xb<<4) /* Source is immediate far address */
|
||||
#define SrcMemFAddr (0xc<<4) /* Source is far address in memory */
|
||||
#define SrcAcc (0xd<<4) /* Source Accumulator */
|
||||
#define SrcImmU16 (0xe<<4) /* Immediate operand, unsigned, 16 bits */
|
||||
#define SrcMask (0xf<<4)
|
||||
#define SrcNone (0<<5) /* No source operand. */
|
||||
#define SrcReg (1<<5) /* Register operand. */
|
||||
#define SrcMem (2<<5) /* Memory operand. */
|
||||
#define SrcMem16 (3<<5) /* Memory operand (16-bit). */
|
||||
#define SrcMem32 (4<<5) /* Memory operand (32-bit). */
|
||||
#define SrcImm (5<<5) /* Immediate operand. */
|
||||
#define SrcImmByte (6<<5) /* 8-bit sign-extended immediate operand. */
|
||||
#define SrcOne (7<<5) /* Implied '1' */
|
||||
#define SrcImmUByte (8<<5) /* 8-bit unsigned immediate operand. */
|
||||
#define SrcImmU (9<<5) /* Immediate operand, unsigned */
|
||||
#define SrcSI (0xa<<5) /* Source is in the DS:RSI */
|
||||
#define SrcImmFAddr (0xb<<5) /* Source is immediate far address */
|
||||
#define SrcMemFAddr (0xc<<5) /* Source is far address in memory */
|
||||
#define SrcAcc (0xd<<5) /* Source Accumulator */
|
||||
#define SrcImmU16 (0xe<<5) /* Immediate operand, unsigned, 16 bits */
|
||||
#define SrcDX (0xf<<5) /* Source is in DX register */
|
||||
#define SrcMask (0xf<<5)
|
||||
/* Generic ModRM decode. */
|
||||
#define ModRM (1<<8)
|
||||
#define ModRM (1<<9)
|
||||
/* Destination is only written; never read. */
|
||||
#define Mov (1<<9)
|
||||
#define BitOp (1<<10)
|
||||
#define MemAbs (1<<11) /* Memory operand is absolute displacement */
|
||||
#define String (1<<12) /* String instruction (rep capable) */
|
||||
#define Stack (1<<13) /* Stack instruction (push/pop) */
|
||||
#define GroupMask (7<<14) /* Opcode uses one of the group mechanisms */
|
||||
#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
|
||||
#define GroupDual (2<<14) /* Alternate decoding of mod == 3 */
|
||||
#define Prefix (3<<14) /* Instruction varies with 66/f2/f3 prefix */
|
||||
#define RMExt (4<<14) /* Opcode extension in ModRM r/m if mod == 3 */
|
||||
#define Sse (1<<17) /* SSE Vector instruction */
|
||||
#define Mov (1<<10)
|
||||
#define BitOp (1<<11)
|
||||
#define MemAbs (1<<12) /* Memory operand is absolute displacement */
|
||||
#define String (1<<13) /* String instruction (rep capable) */
|
||||
#define Stack (1<<14) /* Stack instruction (push/pop) */
|
||||
#define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */
|
||||
#define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */
|
||||
#define GroupDual (2<<15) /* Alternate decoding of mod == 3 */
|
||||
#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
|
||||
#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
|
||||
#define Sse (1<<18) /* SSE Vector instruction */
|
||||
/* Misc flags */
|
||||
#define Prot (1<<21) /* instruction generates #UD if not in prot-mode */
|
||||
#define VendorSpecific (1<<22) /* Vendor specific instruction */
|
||||
|
@ -3154,8 +3156,8 @@ static struct opcode opcode_table[256] = {
|
|||
I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
|
||||
I(SrcImmByte | Mov | Stack, em_push),
|
||||
I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
|
||||
D2bvIP(DstDI | Mov | String, ins, check_perm_in), /* insb, insw/insd */
|
||||
D2bvIP(SrcSI | ImplicitOps | String, outs, check_perm_out), /* outsb, outsw/outsd */
|
||||
D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */
|
||||
D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */
|
||||
/* 0x70 - 0x7F */
|
||||
X16(D(SrcImmByte)),
|
||||
/* 0x80 - 0x87 */
|
||||
|
@ -3212,8 +3214,8 @@ static struct opcode opcode_table[256] = {
|
|||
/* 0xE8 - 0xEF */
|
||||
D(SrcImm | Stack), D(SrcImm | ImplicitOps),
|
||||
D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps),
|
||||
D2bvIP(SrcNone | DstAcc, in, check_perm_in),
|
||||
D2bvIP(SrcAcc | ImplicitOps, out, check_perm_out),
|
||||
D2bvIP(SrcDX | DstAcc, in, check_perm_in),
|
||||
D2bvIP(SrcAcc | DstDX, out, check_perm_out),
|
||||
/* 0xF0 - 0xF7 */
|
||||
N, DI(ImplicitOps, icebp), N, N,
|
||||
DI(ImplicitOps | Priv, hlt), D(ImplicitOps),
|
||||
|
@ -3613,6 +3615,12 @@ done_prefixes:
|
|||
memop.bytes = c->op_bytes + 2;
|
||||
goto srcmem_common;
|
||||
break;
|
||||
case SrcDX:
|
||||
c->src.type = OP_REG;
|
||||
c->src.bytes = 2;
|
||||
c->src.addr.reg = &c->regs[VCPU_REGS_RDX];
|
||||
fetch_register_operand(&c->src);
|
||||
break;
|
||||
}
|
||||
|
||||
if (rc != X86EMUL_CONTINUE)
|
||||
|
@ -3682,6 +3690,12 @@ done_prefixes:
|
|||
c->dst.addr.mem.seg = VCPU_SREG_ES;
|
||||
c->dst.val = 0;
|
||||
break;
|
||||
case DstDX:
|
||||
c->dst.type = OP_REG;
|
||||
c->dst.bytes = 2;
|
||||
c->dst.addr.reg = &c->regs[VCPU_REGS_RDX];
|
||||
fetch_register_operand(&c->dst);
|
||||
break;
|
||||
case ImplicitOps:
|
||||
/* Special instructions do their own operand decoding. */
|
||||
default:
|
||||
|
@ -4027,7 +4041,6 @@ special_insn:
|
|||
break;
|
||||
case 0xec: /* in al,dx */
|
||||
case 0xed: /* in (e/r)ax,dx */
|
||||
c->src.val = c->regs[VCPU_REGS_RDX];
|
||||
do_io_in:
|
||||
if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
|
||||
&c->dst.val))
|
||||
|
@ -4035,7 +4048,6 @@ special_insn:
|
|||
break;
|
||||
case 0xee: /* out dx,al */
|
||||
case 0xef: /* out dx,(e/r)ax */
|
||||
c->dst.val = c->regs[VCPU_REGS_RDX];
|
||||
do_io_out:
|
||||
ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val,
|
||||
&c->src.val, 1);
|
||||
|
|
|
@ -87,32 +87,20 @@ config GPIO_IT8761E
|
|||
Say yes here to support GPIO functionality of IT8761E super I/O chip.
|
||||
|
||||
config GPIO_EXYNOS4
|
||||
bool "Samsung Exynos4 GPIO library support"
|
||||
default y if CPU_EXYNOS4210
|
||||
depends on ARM
|
||||
help
|
||||
Say yes here to support Samsung Exynos4 series SoCs GPIO library
|
||||
def_bool y
|
||||
depends on CPU_EXYNOS4210
|
||||
|
||||
config GPIO_PLAT_SAMSUNG
|
||||
bool "Samsung SoCs GPIO library support"
|
||||
default y if SAMSUNG_GPIOLIB_4BIT
|
||||
depends on ARM
|
||||
help
|
||||
Say yes here to support Samsung SoCs GPIO library
|
||||
def_bool y
|
||||
depends on SAMSUNG_GPIOLIB_4BIT
|
||||
|
||||
config GPIO_S5PC100
|
||||
bool "Samsung S5PC100 GPIO library support"
|
||||
default y if CPU_S5PC100
|
||||
depends on ARM
|
||||
help
|
||||
Say yes here to support Samsung S5PC100 SoCs GPIO library
|
||||
def_bool y
|
||||
depends on CPU_S5PC100
|
||||
|
||||
config GPIO_S5PV210
|
||||
bool "Samsung S5PV210/S5PC110 GPIO library support"
|
||||
default y if CPU_S5PV210
|
||||
depends on ARM
|
||||
help
|
||||
Say yes here to support Samsung S5PV210/S5PC110 SoCs GPIO library
|
||||
def_bool y
|
||||
depends on CPU_S5PV210
|
||||
|
||||
config GPIO_PL061
|
||||
bool "PrimeCell PL061 GPIO support"
|
||||
|
|
|
@ -21,16 +21,37 @@
|
|||
#include <plat/gpio-cfg.h>
|
||||
#include <plat/gpio-cfg-helpers.h>
|
||||
|
||||
int s3c_gpio_setpull_exynos4(struct s3c_gpio_chip *chip,
|
||||
unsigned int off, s3c_gpio_pull_t pull)
|
||||
{
|
||||
if (pull == S3C_GPIO_PULL_UP)
|
||||
pull = 3;
|
||||
|
||||
return s3c_gpio_setpull_updown(chip, off, pull);
|
||||
}
|
||||
|
||||
s3c_gpio_pull_t s3c_gpio_getpull_exynos4(struct s3c_gpio_chip *chip,
|
||||
unsigned int off)
|
||||
{
|
||||
s3c_gpio_pull_t pull;
|
||||
|
||||
pull = s3c_gpio_getpull_updown(chip, off);
|
||||
if (pull == 3)
|
||||
pull = S3C_GPIO_PULL_UP;
|
||||
|
||||
return pull;
|
||||
}
|
||||
|
||||
static struct s3c_gpio_cfg gpio_cfg = {
|
||||
.set_config = s3c_gpio_setcfg_s3c64xx_4bit,
|
||||
.set_pull = s3c_gpio_setpull_updown,
|
||||
.get_pull = s3c_gpio_getpull_updown,
|
||||
.set_pull = s3c_gpio_setpull_exynos4,
|
||||
.get_pull = s3c_gpio_getpull_exynos4,
|
||||
};
|
||||
|
||||
static struct s3c_gpio_cfg gpio_cfg_noint = {
|
||||
.set_config = s3c_gpio_setcfg_s3c64xx_4bit,
|
||||
.set_pull = s3c_gpio_setpull_updown,
|
||||
.get_pull = s3c_gpio_getpull_updown,
|
||||
.set_pull = s3c_gpio_setpull_exynos4,
|
||||
.get_pull = s3c_gpio_getpull_exynos4,
|
||||
};
|
||||
|
||||
/*
|
||||
|
|
|
@ -432,7 +432,6 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
|
|||
{
|
||||
void __iomem *base = bank->base;
|
||||
u32 gpio_bit = 1 << gpio;
|
||||
u32 val;
|
||||
|
||||
if (cpu_is_omap44xx()) {
|
||||
MOD_REG_BIT(OMAP4_GPIO_LEVELDETECT0, gpio_bit,
|
||||
|
@ -455,15 +454,8 @@ static inline void set_24xx_gpio_triggering(struct gpio_bank *bank, int gpio,
|
|||
}
|
||||
if (likely(!(bank->non_wakeup_gpios & gpio_bit))) {
|
||||
if (cpu_is_omap44xx()) {
|
||||
if (trigger != 0)
|
||||
__raw_writel(1 << gpio, bank->base+
|
||||
OMAP4_GPIO_IRQWAKEN0);
|
||||
else {
|
||||
val = __raw_readl(bank->base +
|
||||
OMAP4_GPIO_IRQWAKEN0);
|
||||
__raw_writel(val & (~(1 << gpio)), bank->base +
|
||||
OMAP4_GPIO_IRQWAKEN0);
|
||||
}
|
||||
MOD_REG_BIT(OMAP4_GPIO_IRQWAKEN0, gpio_bit,
|
||||
trigger != 0);
|
||||
} else {
|
||||
/*
|
||||
* GPIO wakeup request can only be generated on edge
|
||||
|
@ -1134,8 +1126,11 @@ static void gpio_irq_shutdown(struct irq_data *d)
|
|||
{
|
||||
unsigned int gpio = d->irq - IH_GPIO_BASE;
|
||||
struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bank->lock, flags);
|
||||
_reset_gpio(bank, gpio);
|
||||
spin_unlock_irqrestore(&bank->lock, flags);
|
||||
}
|
||||
|
||||
static void gpio_ack_irq(struct irq_data *d)
|
||||
|
@ -1150,9 +1145,12 @@ static void gpio_mask_irq(struct irq_data *d)
|
|||
{
|
||||
unsigned int gpio = d->irq - IH_GPIO_BASE;
|
||||
struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bank->lock, flags);
|
||||
_set_gpio_irqenable(bank, gpio, 0);
|
||||
_set_gpio_triggering(bank, get_gpio_index(gpio), IRQ_TYPE_NONE);
|
||||
spin_unlock_irqrestore(&bank->lock, flags);
|
||||
}
|
||||
|
||||
static void gpio_unmask_irq(struct irq_data *d)
|
||||
|
@ -1161,7 +1159,9 @@ static void gpio_unmask_irq(struct irq_data *d)
|
|||
struct gpio_bank *bank = irq_data_get_irq_chip_data(d);
|
||||
unsigned int irq_mask = 1 << get_gpio_index(gpio);
|
||||
u32 trigger = irqd_get_trigger_type(d);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&bank->lock, flags);
|
||||
if (trigger)
|
||||
_set_gpio_triggering(bank, get_gpio_index(gpio), trigger);
|
||||
|
||||
|
@ -1173,6 +1173,7 @@ static void gpio_unmask_irq(struct irq_data *d)
|
|||
}
|
||||
|
||||
_set_gpio_irqenable(bank, gpio, 1);
|
||||
spin_unlock_irqrestore(&bank->lock, flags);
|
||||
}
|
||||
|
||||
static struct irq_chip gpio_irq_chip = {
|
||||
|
@ -1524,7 +1525,7 @@ static void omap_gpio_mod_init(struct gpio_bank *bank, int id)
|
|||
}
|
||||
}
|
||||
|
||||
static void __init omap_gpio_chip_init(struct gpio_bank *bank)
|
||||
static void __devinit omap_gpio_chip_init(struct gpio_bank *bank)
|
||||
{
|
||||
int j;
|
||||
static int gpio;
|
||||
|
|
|
@ -776,7 +776,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
|
|||
seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
|
||||
seq_printf(m, " seqno: 0x%08x\n", error->seqno);
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
for (i = 0; i < dev_priv->num_fence_regs; i++)
|
||||
seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
|
||||
|
||||
if (error->active_bo)
|
||||
|
|
|
@ -716,6 +716,7 @@ typedef struct drm_i915_private {
|
|||
struct intel_fbdev *fbdev;
|
||||
|
||||
struct drm_property *broadcast_rgb_property;
|
||||
struct drm_property *force_audio_property;
|
||||
|
||||
atomic_t forcewake_count;
|
||||
} drm_i915_private_t;
|
||||
|
@ -909,13 +910,6 @@ struct drm_i915_file_private {
|
|||
} mm;
|
||||
};
|
||||
|
||||
enum intel_chip_family {
|
||||
CHIP_I8XX = 0x01,
|
||||
CHIP_I9XX = 0x02,
|
||||
CHIP_I915 = 0x04,
|
||||
CHIP_I965 = 0x08,
|
||||
};
|
||||
|
||||
#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
|
||||
|
||||
#define IS_I830(dev) ((dev)->pci_device == 0x3577)
|
||||
|
|
|
@ -354,7 +354,7 @@ i915_gem_shmem_pread_fast(struct drm_device *dev,
|
|||
* page_offset = offset within page
|
||||
* page_length = bytes to copy for this page
|
||||
*/
|
||||
page_offset = offset & (PAGE_SIZE-1);
|
||||
page_offset = offset_in_page(offset);
|
||||
page_length = remain;
|
||||
if ((page_offset + remain) > PAGE_SIZE)
|
||||
page_length = PAGE_SIZE - page_offset;
|
||||
|
@ -453,9 +453,9 @@ i915_gem_shmem_pread_slow(struct drm_device *dev,
|
|||
* data_page_offset = offset with data_page_index page.
|
||||
* page_length = bytes to copy for this page
|
||||
*/
|
||||
shmem_page_offset = offset & ~PAGE_MASK;
|
||||
shmem_page_offset = offset_in_page(offset);
|
||||
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
|
||||
data_page_offset = data_ptr & ~PAGE_MASK;
|
||||
data_page_offset = offset_in_page(data_ptr);
|
||||
|
||||
page_length = remain;
|
||||
if ((shmem_page_offset + page_length) > PAGE_SIZE)
|
||||
|
@ -638,8 +638,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
|
|||
* page_offset = offset within page
|
||||
* page_length = bytes to copy for this page
|
||||
*/
|
||||
page_base = (offset & ~(PAGE_SIZE-1));
|
||||
page_offset = offset & (PAGE_SIZE-1);
|
||||
page_base = offset & PAGE_MASK;
|
||||
page_offset = offset_in_page(offset);
|
||||
page_length = remain;
|
||||
if ((page_offset + remain) > PAGE_SIZE)
|
||||
page_length = PAGE_SIZE - page_offset;
|
||||
|
@ -650,7 +650,6 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
|
|||
*/
|
||||
if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
|
||||
page_offset, user_data, page_length))
|
||||
|
||||
return -EFAULT;
|
||||
|
||||
remain -= page_length;
|
||||
|
@ -730,9 +729,9 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev,
|
|||
* page_length = bytes to copy for this page
|
||||
*/
|
||||
gtt_page_base = offset & PAGE_MASK;
|
||||
gtt_page_offset = offset & ~PAGE_MASK;
|
||||
gtt_page_offset = offset_in_page(offset);
|
||||
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
|
||||
data_page_offset = data_ptr & ~PAGE_MASK;
|
||||
data_page_offset = offset_in_page(data_ptr);
|
||||
|
||||
page_length = remain;
|
||||
if ((gtt_page_offset + page_length) > PAGE_SIZE)
|
||||
|
@ -791,7 +790,7 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
|
|||
* page_offset = offset within page
|
||||
* page_length = bytes to copy for this page
|
||||
*/
|
||||
page_offset = offset & (PAGE_SIZE-1);
|
||||
page_offset = offset_in_page(offset);
|
||||
page_length = remain;
|
||||
if ((page_offset + remain) > PAGE_SIZE)
|
||||
page_length = PAGE_SIZE - page_offset;
|
||||
|
@ -896,9 +895,9 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev,
|
|||
* data_page_offset = offset with data_page_index page.
|
||||
* page_length = bytes to copy for this page
|
||||
*/
|
||||
shmem_page_offset = offset & ~PAGE_MASK;
|
||||
shmem_page_offset = offset_in_page(offset);
|
||||
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
|
||||
data_page_offset = data_ptr & ~PAGE_MASK;
|
||||
data_page_offset = offset_in_page(data_ptr);
|
||||
|
||||
page_length = remain;
|
||||
if ((shmem_page_offset + page_length) > PAGE_SIZE)
|
||||
|
@ -1450,8 +1449,9 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
|
|||
* edge of an even tile row (where tile rows are counted as if the bo is
|
||||
* placed in a fenced gtt region).
|
||||
*/
|
||||
if (IS_GEN2(dev) ||
|
||||
(obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
|
||||
if (IS_GEN2(dev))
|
||||
tile_height = 16;
|
||||
else if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
|
||||
tile_height = 32;
|
||||
else
|
||||
tile_height = 8;
|
||||
|
|
|
@ -517,7 +517,7 @@ irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
|
|||
if (de_iir & DE_PIPEA_VBLANK_IVB)
|
||||
drm_handle_vblank(dev, 0);
|
||||
|
||||
if (de_iir & DE_PIPEB_VBLANK_IVB);
|
||||
if (de_iir & DE_PIPEB_VBLANK_IVB)
|
||||
drm_handle_vblank(dev, 1);
|
||||
|
||||
/* check event from PCH */
|
||||
|
|
|
@ -288,6 +288,8 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
|
|||
* This may be a DVI-I connector with a shared DDC
|
||||
* link between analog and digital outputs, so we
|
||||
* have to check the EDID input spec of the attached device.
|
||||
*
|
||||
* On the other hand, what should we do if it is a broken EDID?
|
||||
*/
|
||||
if (edid != NULL) {
|
||||
is_digital = edid->input & DRM_EDID_INPUT_DIGITAL;
|
||||
|
@ -298,6 +300,8 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
|
|||
if (!is_digital) {
|
||||
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
|
||||
return true;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3983,54 +3983,6 @@ static void i830_update_wm(struct drm_device *dev)
|
|||
#define ILK_LP0_PLANE_LATENCY 700
|
||||
#define ILK_LP0_CURSOR_LATENCY 1300
|
||||
|
||||
static bool ironlake_compute_wm0(struct drm_device *dev,
|
||||
int pipe,
|
||||
const struct intel_watermark_params *display,
|
||||
int display_latency_ns,
|
||||
const struct intel_watermark_params *cursor,
|
||||
int cursor_latency_ns,
|
||||
int *plane_wm,
|
||||
int *cursor_wm)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
int htotal, hdisplay, clock, pixel_size;
|
||||
int line_time_us, line_count;
|
||||
int entries, tlb_miss;
|
||||
|
||||
crtc = intel_get_crtc_for_pipe(dev, pipe);
|
||||
if (crtc->fb == NULL || !crtc->enabled)
|
||||
return false;
|
||||
|
||||
htotal = crtc->mode.htotal;
|
||||
hdisplay = crtc->mode.hdisplay;
|
||||
clock = crtc->mode.clock;
|
||||
pixel_size = crtc->fb->bits_per_pixel / 8;
|
||||
|
||||
/* Use the small buffer method to calculate plane watermark */
|
||||
entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000;
|
||||
tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8;
|
||||
if (tlb_miss > 0)
|
||||
entries += tlb_miss;
|
||||
entries = DIV_ROUND_UP(entries, display->cacheline_size);
|
||||
*plane_wm = entries + display->guard_size;
|
||||
if (*plane_wm > (int)display->max_wm)
|
||||
*plane_wm = display->max_wm;
|
||||
|
||||
/* Use the large buffer method to calculate cursor watermark */
|
||||
line_time_us = ((htotal * 1000) / clock);
|
||||
line_count = (cursor_latency_ns / line_time_us + 1000) / 1000;
|
||||
entries = line_count * 64 * pixel_size;
|
||||
tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8;
|
||||
if (tlb_miss > 0)
|
||||
entries += tlb_miss;
|
||||
entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
|
||||
*cursor_wm = entries + cursor->guard_size;
|
||||
if (*cursor_wm > (int)cursor->max_wm)
|
||||
*cursor_wm = (int)cursor->max_wm;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the wm result.
|
||||
*
|
||||
|
@ -4139,12 +4091,12 @@ static void ironlake_update_wm(struct drm_device *dev)
|
|||
unsigned int enabled;
|
||||
|
||||
enabled = 0;
|
||||
if (ironlake_compute_wm0(dev, 0,
|
||||
&ironlake_display_wm_info,
|
||||
ILK_LP0_PLANE_LATENCY,
|
||||
&ironlake_cursor_wm_info,
|
||||
ILK_LP0_CURSOR_LATENCY,
|
||||
&plane_wm, &cursor_wm)) {
|
||||
if (g4x_compute_wm0(dev, 0,
|
||||
&ironlake_display_wm_info,
|
||||
ILK_LP0_PLANE_LATENCY,
|
||||
&ironlake_cursor_wm_info,
|
||||
ILK_LP0_CURSOR_LATENCY,
|
||||
&plane_wm, &cursor_wm)) {
|
||||
I915_WRITE(WM0_PIPEA_ILK,
|
||||
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
|
||||
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
|
||||
|
@ -4153,12 +4105,12 @@ static void ironlake_update_wm(struct drm_device *dev)
|
|||
enabled |= 1;
|
||||
}
|
||||
|
||||
if (ironlake_compute_wm0(dev, 1,
|
||||
&ironlake_display_wm_info,
|
||||
ILK_LP0_PLANE_LATENCY,
|
||||
&ironlake_cursor_wm_info,
|
||||
ILK_LP0_CURSOR_LATENCY,
|
||||
&plane_wm, &cursor_wm)) {
|
||||
if (g4x_compute_wm0(dev, 1,
|
||||
&ironlake_display_wm_info,
|
||||
ILK_LP0_PLANE_LATENCY,
|
||||
&ironlake_cursor_wm_info,
|
||||
ILK_LP0_CURSOR_LATENCY,
|
||||
&plane_wm, &cursor_wm)) {
|
||||
I915_WRITE(WM0_PIPEB_ILK,
|
||||
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
|
||||
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
|
||||
|
@ -4223,10 +4175,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
|
|||
unsigned int enabled;
|
||||
|
||||
enabled = 0;
|
||||
if (ironlake_compute_wm0(dev, 0,
|
||||
&sandybridge_display_wm_info, latency,
|
||||
&sandybridge_cursor_wm_info, latency,
|
||||
&plane_wm, &cursor_wm)) {
|
||||
if (g4x_compute_wm0(dev, 0,
|
||||
&sandybridge_display_wm_info, latency,
|
||||
&sandybridge_cursor_wm_info, latency,
|
||||
&plane_wm, &cursor_wm)) {
|
||||
I915_WRITE(WM0_PIPEA_ILK,
|
||||
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
|
||||
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
|
||||
|
@ -4235,10 +4187,10 @@ static void sandybridge_update_wm(struct drm_device *dev)
|
|||
enabled |= 1;
|
||||
}
|
||||
|
||||
if (ironlake_compute_wm0(dev, 1,
|
||||
&sandybridge_display_wm_info, latency,
|
||||
&sandybridge_cursor_wm_info, latency,
|
||||
&plane_wm, &cursor_wm)) {
|
||||
if (g4x_compute_wm0(dev, 1,
|
||||
&sandybridge_display_wm_info, latency,
|
||||
&sandybridge_cursor_wm_info, latency,
|
||||
&plane_wm, &cursor_wm)) {
|
||||
I915_WRITE(WM0_PIPEB_ILK,
|
||||
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
|
||||
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
|
||||
|
@ -7675,6 +7627,7 @@ static void intel_init_display(struct drm_device *dev)
|
|||
dev_priv->display.update_wm = NULL;
|
||||
} else
|
||||
dev_priv->display.update_wm = pineview_update_wm;
|
||||
dev_priv->display.init_clock_gating = gen3_init_clock_gating;
|
||||
} else if (IS_G4X(dev)) {
|
||||
dev_priv->display.update_wm = g4x_update_wm;
|
||||
dev_priv->display.init_clock_gating = g4x_init_clock_gating;
|
||||
|
|
|
@ -59,8 +59,6 @@ struct intel_dp {
|
|||
bool is_pch_edp;
|
||||
uint8_t train_set[4];
|
||||
uint8_t link_status[DP_LINK_STATUS_SIZE];
|
||||
|
||||
struct drm_property *force_audio_property;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -1702,7 +1700,7 @@ intel_dp_set_property(struct drm_connector *connector,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (property == intel_dp->force_audio_property) {
|
||||
if (property == dev_priv->force_audio_property) {
|
||||
int i = val;
|
||||
bool has_audio;
|
||||
|
||||
|
@ -1841,16 +1839,7 @@ bool intel_dpd_is_edp(struct drm_device *dev)
|
|||
static void
|
||||
intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
|
||||
intel_dp->force_audio_property =
|
||||
drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
|
||||
if (intel_dp->force_audio_property) {
|
||||
intel_dp->force_audio_property->values[0] = -1;
|
||||
intel_dp->force_audio_property->values[1] = 1;
|
||||
drm_connector_attach_property(connector, intel_dp->force_audio_property, 0);
|
||||
}
|
||||
|
||||
intel_attach_force_audio_property(connector);
|
||||
intel_attach_broadcast_rgb_property(connector);
|
||||
}
|
||||
|
||||
|
|
|
@ -236,6 +236,7 @@ struct intel_unpin_work {
|
|||
int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
|
||||
extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
|
||||
|
||||
extern void intel_attach_force_audio_property(struct drm_connector *connector);
|
||||
extern void intel_attach_broadcast_rgb_property(struct drm_connector *connector);
|
||||
|
||||
extern void intel_crt_init(struct drm_device *dev);
|
||||
|
|
|
@ -45,7 +45,6 @@ struct intel_hdmi {
|
|||
bool has_hdmi_sink;
|
||||
bool has_audio;
|
||||
int force_audio;
|
||||
struct drm_property *force_audio_property;
|
||||
};
|
||||
|
||||
static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
|
||||
|
@ -194,7 +193,7 @@ static int intel_hdmi_mode_valid(struct drm_connector *connector,
|
|||
if (mode->clock > 165000)
|
||||
return MODE_CLOCK_HIGH;
|
||||
if (mode->clock < 20000)
|
||||
return MODE_CLOCK_HIGH;
|
||||
return MODE_CLOCK_LOW;
|
||||
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
return MODE_NO_DBLESCAN;
|
||||
|
@ -287,7 +286,7 @@ intel_hdmi_set_property(struct drm_connector *connector,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (property == intel_hdmi->force_audio_property) {
|
||||
if (property == dev_priv->force_audio_property) {
|
||||
int i = val;
|
||||
bool has_audio;
|
||||
|
||||
|
@ -365,16 +364,7 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
|
|||
static void
|
||||
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
|
||||
intel_hdmi->force_audio_property =
|
||||
drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
|
||||
if (intel_hdmi->force_audio_property) {
|
||||
intel_hdmi->force_audio_property->values[0] = -1;
|
||||
intel_hdmi->force_audio_property->values[1] = 1;
|
||||
drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
|
||||
}
|
||||
|
||||
intel_attach_force_audio_property(connector);
|
||||
intel_attach_broadcast_rgb_property(connector);
|
||||
}
|
||||
|
||||
|
|
|
@ -727,6 +727,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "U800"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = intel_no_lvds_dmi_callback,
|
||||
.ident = "Asus EeeBox PC EB1007",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "EB1007"),
|
||||
},
|
||||
},
|
||||
|
||||
{ } /* terminating entry */
|
||||
};
|
||||
|
|
|
@ -81,6 +81,36 @@ int intel_ddc_get_modes(struct drm_connector *connector,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static const char *force_audio_names[] = {
|
||||
"off",
|
||||
"auto",
|
||||
"on",
|
||||
};
|
||||
|
||||
void
|
||||
intel_attach_force_audio_property(struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_property *prop;
|
||||
int i;
|
||||
|
||||
prop = dev_priv->force_audio_property;
|
||||
if (prop == NULL) {
|
||||
prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
|
||||
"audio",
|
||||
ARRAY_SIZE(force_audio_names));
|
||||
if (prop == NULL)
|
||||
return;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(force_audio_names); i++)
|
||||
drm_property_add_enum(prop, i, i-1, force_audio_names[i]);
|
||||
|
||||
dev_priv->force_audio_property = prop;
|
||||
}
|
||||
drm_connector_attach_property(connector, prop, 0);
|
||||
}
|
||||
|
||||
static const char *broadcast_rgb_names[] = {
|
||||
"Full",
|
||||
"Limited 16:235",
|
||||
|
|
|
@ -148,8 +148,6 @@ struct intel_sdvo_connector {
|
|||
int format_supported_num;
|
||||
struct drm_property *tv_format;
|
||||
|
||||
struct drm_property *force_audio_property;
|
||||
|
||||
/* add the property for the SDVO-TV */
|
||||
struct drm_property *left;
|
||||
struct drm_property *right;
|
||||
|
@ -1712,7 +1710,7 @@ intel_sdvo_set_property(struct drm_connector *connector,
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (property == intel_sdvo_connector->force_audio_property) {
|
||||
if (property == dev_priv->force_audio_property) {
|
||||
int i = val;
|
||||
bool has_audio;
|
||||
|
||||
|
@ -2037,15 +2035,7 @@ intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
|
|||
{
|
||||
struct drm_device *dev = connector->base.base.dev;
|
||||
|
||||
connector->force_audio_property =
|
||||
drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
|
||||
if (connector->force_audio_property) {
|
||||
connector->force_audio_property->values[0] = -1;
|
||||
connector->force_audio_property->values[1] = 1;
|
||||
drm_connector_attach_property(&connector->base.base,
|
||||
connector->force_audio_property, 0);
|
||||
}
|
||||
|
||||
intel_attach_force_audio_property(&connector->base.base);
|
||||
if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
|
||||
intel_attach_broadcast_rgb_property(&connector->base.base);
|
||||
}
|
||||
|
|
|
@ -900,6 +900,7 @@ nv_save_state_ext(struct drm_device *dev, int head,
|
|||
}
|
||||
/* NV11 and NV20 don't have this, they stop at 0x52. */
|
||||
if (nv_gf4_disp_arch(dev)) {
|
||||
rd_cio_state(dev, head, regp, NV_CIO_CRE_42);
|
||||
rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
|
||||
rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
|
||||
|
||||
|
@ -1003,6 +1004,7 @@ nv_load_state_ext(struct drm_device *dev, int head,
|
|||
nouveau_wait_eq(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
|
||||
}
|
||||
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
|
||||
|
||||
|
|
|
@ -397,7 +397,7 @@ nouveau_mem_vram_init(struct drm_device *dev)
|
|||
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
|
||||
dma_bits = 40;
|
||||
} else
|
||||
if (drm_pci_device_is_pcie(dev) &&
|
||||
if (0 && drm_pci_device_is_pcie(dev) &&
|
||||
dev_priv->chipset > 0x40 &&
|
||||
dev_priv->chipset != 0x45) {
|
||||
if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
|
||||
|
@ -868,7 +868,9 @@ nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
|
|||
nouveau_vm_unmap(&node->tmp_vma);
|
||||
nouveau_vm_put(&node->tmp_vma);
|
||||
}
|
||||
|
||||
mem->mm_node = NULL;
|
||||
kfree(node);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -458,7 +458,7 @@ nouveau_sgdma_init(struct drm_device *dev)
|
|||
dev_priv->gart_info.type = NOUVEAU_GART_HW;
|
||||
dev_priv->gart_info.func = &nv50_sgdma_backend;
|
||||
} else
|
||||
if (drm_pci_device_is_pcie(dev) &&
|
||||
if (0 && drm_pci_device_is_pcie(dev) &&
|
||||
dev_priv->chipset > 0x40 && dev_priv->chipset != 0x45) {
|
||||
if (nv44_graph_class(dev)) {
|
||||
dev_priv->gart_info.func = &nv44_sgdma_backend;
|
||||
|
|
|
@ -371,6 +371,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
|||
engine->vram.flags_valid = nv50_vram_flags_valid;
|
||||
break;
|
||||
case 0xC0:
|
||||
case 0xD0:
|
||||
engine->instmem.init = nvc0_instmem_init;
|
||||
engine->instmem.takedown = nvc0_instmem_takedown;
|
||||
engine->instmem.suspend = nvc0_instmem_suspend;
|
||||
|
@ -563,68 +564,68 @@ nouveau_card_init(struct drm_device *dev)
|
|||
if (ret)
|
||||
goto out_timer;
|
||||
|
||||
switch (dev_priv->card_type) {
|
||||
case NV_04:
|
||||
nv04_graph_create(dev);
|
||||
break;
|
||||
case NV_10:
|
||||
nv10_graph_create(dev);
|
||||
break;
|
||||
case NV_20:
|
||||
case NV_30:
|
||||
nv20_graph_create(dev);
|
||||
break;
|
||||
case NV_40:
|
||||
nv40_graph_create(dev);
|
||||
break;
|
||||
case NV_50:
|
||||
nv50_graph_create(dev);
|
||||
break;
|
||||
case NV_C0:
|
||||
nvc0_graph_create(dev);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x84:
|
||||
case 0x86:
|
||||
case 0x92:
|
||||
case 0x94:
|
||||
case 0x96:
|
||||
case 0xa0:
|
||||
nv84_crypt_create(dev);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (dev_priv->card_type) {
|
||||
case NV_50:
|
||||
switch (dev_priv->chipset) {
|
||||
case 0xa3:
|
||||
case 0xa5:
|
||||
case 0xa8:
|
||||
case 0xaf:
|
||||
nva3_copy_create(dev);
|
||||
if (!nouveau_noaccel) {
|
||||
switch (dev_priv->card_type) {
|
||||
case NV_04:
|
||||
nv04_graph_create(dev);
|
||||
break;
|
||||
case NV_10:
|
||||
nv10_graph_create(dev);
|
||||
break;
|
||||
case NV_20:
|
||||
case NV_30:
|
||||
nv20_graph_create(dev);
|
||||
break;
|
||||
case NV_40:
|
||||
nv40_graph_create(dev);
|
||||
break;
|
||||
case NV_50:
|
||||
nv50_graph_create(dev);
|
||||
break;
|
||||
case NV_C0:
|
||||
nvc0_graph_create(dev);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case NV_C0:
|
||||
nvc0_copy_create(dev, 0);
|
||||
nvc0_copy_create(dev, 1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (dev_priv->card_type == NV_40)
|
||||
nv40_mpeg_create(dev);
|
||||
else
|
||||
if (dev_priv->card_type == NV_50 &&
|
||||
(dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
|
||||
nv50_mpeg_create(dev);
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x84:
|
||||
case 0x86:
|
||||
case 0x92:
|
||||
case 0x94:
|
||||
case 0x96:
|
||||
case 0xa0:
|
||||
nv84_crypt_create(dev);
|
||||
break;
|
||||
}
|
||||
|
||||
switch (dev_priv->card_type) {
|
||||
case NV_50:
|
||||
switch (dev_priv->chipset) {
|
||||
case 0xa3:
|
||||
case 0xa5:
|
||||
case 0xa8:
|
||||
case 0xaf:
|
||||
nva3_copy_create(dev);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case NV_C0:
|
||||
nvc0_copy_create(dev, 0);
|
||||
nvc0_copy_create(dev, 1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (dev_priv->card_type == NV_40)
|
||||
nv40_mpeg_create(dev);
|
||||
else
|
||||
if (dev_priv->card_type == NV_50 &&
|
||||
(dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
|
||||
nv50_mpeg_create(dev);
|
||||
|
||||
if (!nouveau_noaccel) {
|
||||
for (e = 0; e < NVOBJ_ENGINE_NR; e++) {
|
||||
if (dev_priv->eng[e]) {
|
||||
ret = dev_priv->eng[e]->init(dev, e);
|
||||
|
@ -922,6 +923,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
|
|||
dev_priv->card_type = NV_50;
|
||||
break;
|
||||
case 0xc0:
|
||||
case 0xd0:
|
||||
dev_priv->card_type = NV_C0;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -58,6 +58,7 @@ nouveau_vm_map_at(struct nouveau_vma *vma, u64 delta, struct nouveau_mem *node)
|
|||
num -= len;
|
||||
pte += len;
|
||||
if (unlikely(end >= max)) {
|
||||
phys += len << (bits + 12);
|
||||
pde++;
|
||||
pte = 0;
|
||||
}
|
||||
|
|
|
@ -376,7 +376,10 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
|||
*/
|
||||
|
||||
/* framebuffer can be larger than crtc scanout area. */
|
||||
regp->CRTC[NV_CIO_CRE_RPC0_INDEX] = XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
|
||||
regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
|
||||
XLATE(fb->pitch / 8, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
|
||||
regp->CRTC[NV_CIO_CRE_42] =
|
||||
XLATE(fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
|
||||
regp->CRTC[NV_CIO_CRE_RPC1_INDEX] = mode->crtc_hdisplay < 1280 ?
|
||||
MASK(NV_CIO_CRE_RPC1_LARGE) : 0x00;
|
||||
regp->CRTC[NV_CIO_CRE_LSR_INDEX] = XLATE(horizBlankEnd, 6, NV_CIO_CRE_LSR_HBE_6) |
|
||||
|
@ -824,8 +827,11 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
|||
regp->CRTC[NV_CIO_CR_OFFSET_INDEX] = drm_fb->pitch >> 3;
|
||||
regp->CRTC[NV_CIO_CRE_RPC0_INDEX] =
|
||||
XLATE(drm_fb->pitch >> 3, 8, NV_CIO_CRE_RPC0_OFFSET_10_8);
|
||||
regp->CRTC[NV_CIO_CRE_42] =
|
||||
XLATE(drm_fb->pitch / 8, 11, NV_CIO_CRE_42_OFFSET_11);
|
||||
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_RPC0_INDEX);
|
||||
crtc_wr_cio_state(crtc, regp, NV_CIO_CR_OFFSET_INDEX);
|
||||
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_42);
|
||||
|
||||
/* Update the framebuffer location. */
|
||||
regp->fb_start = nv_crtc->fb.offset & ~3;
|
||||
|
|
|
@ -277,6 +277,8 @@
|
|||
# define NV_CIO_CRE_EBR_VDE_11 2:2
|
||||
# define NV_CIO_CRE_EBR_VRS_11 4:4
|
||||
# define NV_CIO_CRE_EBR_VBS_11 6:6
|
||||
# define NV_CIO_CRE_42 0x42
|
||||
# define NV_CIO_CRE_42_OFFSET_11 6:6
|
||||
# define NV_CIO_CRE_43 0x43
|
||||
# define NV_CIO_CRE_44 0x44 /* head control */
|
||||
# define NV_CIO_CRE_CSB 0x45 /* colour saturation boost */
|
||||
|
|
|
@ -28,11 +28,4 @@ config DRM_RADEON_KMS
|
|||
The kernel will also perform security check on command stream
|
||||
provided by the user, we want to catch and forbid any illegal use
|
||||
of the GPU such as DMA into random system memory or into memory
|
||||
not owned by the process supplying the command stream. This part
|
||||
of the code is still incomplete and this why we propose that patch
|
||||
as a staging driver addition, future security might forbid current
|
||||
experimental userspace to run.
|
||||
|
||||
This code support the following hardware : R1XX,R2XX,R3XX,R4XX,R5XX
|
||||
(radeon up to X1950). Works is underway to provide support for R6XX,
|
||||
R7XX and newer hardware (radeon from HD2XXX to HD4XXX).
|
||||
not owned by the process supplying the command stream.
|
||||
|
|
|
@ -1045,7 +1045,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
uint64_t fb_location;
|
||||
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
|
||||
u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
|
||||
u32 tmp;
|
||||
u32 tmp, viewport_w, viewport_h;
|
||||
int r;
|
||||
|
||||
/* no fb bound */
|
||||
|
@ -1171,8 +1171,10 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
y &= ~1;
|
||||
WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
|
||||
(x << 16) | y);
|
||||
viewport_w = crtc->mode.hdisplay;
|
||||
viewport_h = (crtc->mode.vdisplay + 1) & ~1;
|
||||
WREG32(EVERGREEN_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
|
||||
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
|
||||
(viewport_w << 16) | viewport_h);
|
||||
|
||||
/* pageflip setup */
|
||||
/* make sure flip is at vb rather than hb */
|
||||
|
@ -1213,7 +1215,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
uint64_t fb_location;
|
||||
uint32_t fb_format, fb_pitch_pixels, tiling_flags;
|
||||
u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
|
||||
u32 tmp;
|
||||
u32 tmp, viewport_w, viewport_h;
|
||||
int r;
|
||||
|
||||
/* no fb bound */
|
||||
|
@ -1338,8 +1340,10 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
|
|||
y &= ~1;
|
||||
WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
|
||||
(x << 16) | y);
|
||||
viewport_w = crtc->mode.hdisplay;
|
||||
viewport_h = (crtc->mode.vdisplay + 1) & ~1;
|
||||
WREG32(AVIVO_D1MODE_VIEWPORT_SIZE + radeon_crtc->crtc_offset,
|
||||
(crtc->mode.hdisplay << 16) | crtc->mode.vdisplay);
|
||||
(viewport_w << 16) | viewport_h);
|
||||
|
||||
/* pageflip setup */
|
||||
/* make sure flip is at vb rather than hb */
|
||||
|
|
|
@ -39,17 +39,335 @@
|
|||
|
||||
const u32 cayman_default_state[] =
|
||||
{
|
||||
/* XXX fill in additional blit state */
|
||||
0xc0066900,
|
||||
0x00000000,
|
||||
0x00000060, /* DB_RENDER_CONTROL */
|
||||
0x00000000, /* DB_COUNT_CONTROL */
|
||||
0x00000000, /* DB_DEPTH_VIEW */
|
||||
0x0000002a, /* DB_RENDER_OVERRIDE */
|
||||
0x00000000, /* DB_RENDER_OVERRIDE2 */
|
||||
0x00000000, /* DB_HTILE_DATA_BASE */
|
||||
|
||||
0xc0026900,
|
||||
0x00000316,
|
||||
0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
|
||||
0x00000010, /* */
|
||||
0x0000000a,
|
||||
0x00000000, /* DB_STENCIL_CLEAR */
|
||||
0x00000000, /* DB_DEPTH_CLEAR */
|
||||
|
||||
0xc0036900,
|
||||
0x0000000f,
|
||||
0x00000000, /* DB_DEPTH_INFO */
|
||||
0x00000000, /* DB_Z_INFO */
|
||||
0x00000000, /* DB_STENCIL_INFO */
|
||||
|
||||
0xc0016900,
|
||||
0x00000080,
|
||||
0x00000000, /* PA_SC_WINDOW_OFFSET */
|
||||
|
||||
0xc00d6900,
|
||||
0x00000083,
|
||||
0x0000ffff, /* PA_SC_CLIPRECT_RULE */
|
||||
0x00000000, /* PA_SC_CLIPRECT_0_TL */
|
||||
0x20002000, /* PA_SC_CLIPRECT_0_BR */
|
||||
0x00000000,
|
||||
0x20002000,
|
||||
0x00000000,
|
||||
0x20002000,
|
||||
0x00000000,
|
||||
0x20002000,
|
||||
0xaaaaaaaa, /* PA_SC_EDGERULE */
|
||||
0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */
|
||||
0x0000000f, /* CB_TARGET_MASK */
|
||||
0x0000000f, /* CB_SHADER_MASK */
|
||||
|
||||
0xc0226900,
|
||||
0x00000094,
|
||||
0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */
|
||||
0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x80000000,
|
||||
0x20002000,
|
||||
0x00000000, /* PA_SC_VPORT_ZMIN_0 */
|
||||
0x3f800000, /* PA_SC_VPORT_ZMAX_0 */
|
||||
|
||||
0xc0016900,
|
||||
0x000000d4,
|
||||
0x00000000, /* SX_MISC */
|
||||
|
||||
0xc0026900,
|
||||
0x000000d9,
|
||||
0x00000000, /* CP_RINGID */
|
||||
0x00000000, /* CP_VMID */
|
||||
|
||||
0xc0096900,
|
||||
0x00000100,
|
||||
0x00ffffff, /* VGT_MAX_VTX_INDX */
|
||||
0x00000000, /* VGT_MIN_VTX_INDX */
|
||||
0x00000000, /* VGT_INDX_OFFSET */
|
||||
0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */
|
||||
0x00000000, /* SX_ALPHA_TEST_CONTROL */
|
||||
0x00000000, /* CB_BLEND_RED */
|
||||
0x00000000, /* CB_BLEND_GREEN */
|
||||
0x00000000, /* CB_BLEND_BLUE */
|
||||
0x00000000, /* CB_BLEND_ALPHA */
|
||||
|
||||
0xc0016900,
|
||||
0x00000187,
|
||||
0x00000100, /* SPI_VS_OUT_ID_0 */
|
||||
|
||||
0xc0026900,
|
||||
0x00000191,
|
||||
0x00000100, /* SPI_PS_INPUT_CNTL_0 */
|
||||
0x00000101, /* SPI_PS_INPUT_CNTL_1 */
|
||||
|
||||
0xc0016900,
|
||||
0x000001b1,
|
||||
0x00000000, /* SPI_VS_OUT_CONFIG */
|
||||
|
||||
0xc0106900,
|
||||
0x000001b3,
|
||||
0x20000001, /* SPI_PS_IN_CONTROL_0 */
|
||||
0x00000000, /* SPI_PS_IN_CONTROL_1 */
|
||||
0x00000000, /* SPI_INTERP_CONTROL_0 */
|
||||
0x00000000, /* SPI_INPUT_Z */
|
||||
0x00000000, /* SPI_FOG_CNTL */
|
||||
0x00100000, /* SPI_BARYC_CNTL */
|
||||
0x00000000, /* SPI_PS_IN_CONTROL_2 */
|
||||
0x00000000, /* SPI_COMPUTE_INPUT_CNTL */
|
||||
0x00000000, /* SPI_COMPUTE_NUM_THREAD_X */
|
||||
0x00000000, /* SPI_COMPUTE_NUM_THREAD_Y */
|
||||
0x00000000, /* SPI_COMPUTE_NUM_THREAD_Z */
|
||||
0x00000000, /* SPI_GPR_MGMT */
|
||||
0x00000000, /* SPI_LDS_MGMT */
|
||||
0x00000000, /* SPI_STACK_MGMT */
|
||||
0x00000000, /* SPI_WAVE_MGMT_1 */
|
||||
0x00000000, /* SPI_WAVE_MGMT_2 */
|
||||
|
||||
0xc0016900,
|
||||
0x000001e0,
|
||||
0x00000000, /* CB_BLEND0_CONTROL */
|
||||
|
||||
0xc00e6900,
|
||||
0x00000200,
|
||||
0x00000000, /* DB_DEPTH_CONTROL */
|
||||
0x00000000, /* DB_EQAA */
|
||||
0x00cc0010, /* CB_COLOR_CONTROL */
|
||||
0x00000210, /* DB_SHADER_CONTROL */
|
||||
0x00010000, /* PA_CL_CLIP_CNTL */
|
||||
0x00000004, /* PA_SU_SC_MODE_CNTL */
|
||||
0x00000100, /* PA_CL_VTE_CNTL */
|
||||
0x00000000, /* PA_CL_VS_OUT_CNTL */
|
||||
0x00000000, /* PA_CL_NANINF_CNTL */
|
||||
0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */
|
||||
0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */
|
||||
0x00000000, /* PA_SU_PRIM_FILTER_CNTL */
|
||||
0x00000000, /* */
|
||||
0x00000000, /* */
|
||||
|
||||
0xc0026900,
|
||||
0x00000229,
|
||||
0x00000000, /* SQ_PGM_START_FS */
|
||||
0x00000000,
|
||||
|
||||
0xc0016900,
|
||||
0x0000023b,
|
||||
0x00000000, /* SQ_LDS_ALLOC_PS */
|
||||
|
||||
0xc0066900,
|
||||
0x00000240,
|
||||
0x00000000, /* SQ_ESGS_RING_ITEMSIZE */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
||||
0xc0046900,
|
||||
0x00000247,
|
||||
0x00000000, /* SQ_GS_VERT_ITEMSIZE */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
||||
0xc0116900,
|
||||
0x00000280,
|
||||
0x00000000, /* PA_SU_POINT_SIZE */
|
||||
0x00000000, /* PA_SU_POINT_MINMAX */
|
||||
0x00000008, /* PA_SU_LINE_CNTL */
|
||||
0x00000000, /* PA_SC_LINE_STIPPLE */
|
||||
0x00000000, /* VGT_OUTPUT_PATH_CNTL */
|
||||
0x00000000, /* VGT_HOS_CNTL */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000, /* VGT_GS_MODE */
|
||||
|
||||
0xc0026900,
|
||||
0x00000292,
|
||||
0x00000000, /* PA_SC_MODE_CNTL_0 */
|
||||
0x00000000, /* PA_SC_MODE_CNTL_1 */
|
||||
|
||||
0xc0016900,
|
||||
0x000002a1,
|
||||
0x00000000, /* VGT_PRIMITIVEID_EN */
|
||||
|
||||
0xc0016900,
|
||||
0x000002a5,
|
||||
0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */
|
||||
|
||||
0xc0026900,
|
||||
0x000002a8,
|
||||
0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */
|
||||
0x00000000,
|
||||
|
||||
0xc0026900,
|
||||
0x000002ad,
|
||||
0x00000000, /* VGT_REUSE_OFF */
|
||||
0x00000000,
|
||||
|
||||
0xc0016900,
|
||||
0x000002d5,
|
||||
0x00000000, /* VGT_SHADER_STAGES_EN */
|
||||
|
||||
0xc0016900,
|
||||
0x000002dc,
|
||||
0x0000aa00, /* DB_ALPHA_TO_MASK */
|
||||
|
||||
0xc0066900,
|
||||
0x000002de,
|
||||
0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
|
||||
0xc0026900,
|
||||
0x000002e5,
|
||||
0x00000000, /* VGT_STRMOUT_CONFIG */
|
||||
0x00000000,
|
||||
|
||||
0xc01b6900,
|
||||
0x000002f5,
|
||||
0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */
|
||||
0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */
|
||||
0x00000000, /* PA_SC_LINE_CNTL */
|
||||
0x00000000, /* PA_SC_AA_CONFIG */
|
||||
0x00000005, /* PA_SU_VTX_CNTL */
|
||||
0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */
|
||||
0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */
|
||||
0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */
|
||||
0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */
|
||||
0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0x00000000,
|
||||
0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */
|
||||
0xffffffff,
|
||||
|
||||
0xc0026900,
|
||||
0x00000316,
|
||||
0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */
|
||||
0x00000010, /* */
|
||||
};
|
||||
|
||||
const u32 cayman_vs[] =
|
||||
{
|
||||
0x00000004,
|
||||
0x80400400,
|
||||
0x0000a03c,
|
||||
0x95000688,
|
||||
0x00004000,
|
||||
0x15000688,
|
||||
0x00000000,
|
||||
0x88000000,
|
||||
0x04000000,
|
||||
0x67961001,
|
||||
#ifdef __BIG_ENDIAN
|
||||
0x00020000,
|
||||
#else
|
||||
0x00000000,
|
||||
#endif
|
||||
0x00000000,
|
||||
0x04000000,
|
||||
0x67961000,
|
||||
#ifdef __BIG_ENDIAN
|
||||
0x00020008,
|
||||
#else
|
||||
0x00000008,
|
||||
#endif
|
||||
0x00000000,
|
||||
};
|
||||
|
||||
const u32 cayman_ps[] =
|
||||
{
|
||||
0x00000004,
|
||||
0xa00c0000,
|
||||
0x00000008,
|
||||
0x80400000,
|
||||
0x00000000,
|
||||
0x95000688,
|
||||
0x00000000,
|
||||
0x88000000,
|
||||
0x00380400,
|
||||
0x00146b10,
|
||||
0x00380000,
|
||||
0x20146b10,
|
||||
0x00380400,
|
||||
0x40146b00,
|
||||
0x80380000,
|
||||
0x60146b00,
|
||||
0x00000010,
|
||||
0x000d1000,
|
||||
0xb0800000,
|
||||
0x00000000,
|
||||
};
|
||||
|
||||
const u32 cayman_ps_size = ARRAY_SIZE(cayman_ps);
|
||||
const u32 cayman_vs_size = ARRAY_SIZE(cayman_vs);
|
||||
const u32 cayman_default_size = ARRAY_SIZE(cayman_default_state);
|
||||
|
|
|
@ -25,8 +25,11 @@
|
|||
#ifndef CAYMAN_BLIT_SHADERS_H
|
||||
#define CAYMAN_BLIT_SHADERS_H
|
||||
|
||||
extern const u32 cayman_ps[];
|
||||
extern const u32 cayman_vs[];
|
||||
extern const u32 cayman_default_state[];
|
||||
|
||||
extern const u32 cayman_ps_size, cayman_vs_size;
|
||||
extern const u32 cayman_default_size;
|
||||
|
||||
#endif
|
||||
|
|
|
@ -88,21 +88,39 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
|
|||
/* get temperature in millidegrees */
|
||||
int evergreen_get_temp(struct radeon_device *rdev)
|
||||
{
|
||||
u32 temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
|
||||
ASIC_T_SHIFT;
|
||||
u32 actual_temp = 0;
|
||||
u32 temp, toffset, actual_temp = 0;
|
||||
|
||||
if (temp & 0x400)
|
||||
actual_temp = -256;
|
||||
else if (temp & 0x200)
|
||||
actual_temp = 255;
|
||||
else if (temp & 0x100) {
|
||||
actual_temp = temp & 0x1ff;
|
||||
actual_temp |= ~0x1ff;
|
||||
} else
|
||||
actual_temp = temp & 0xff;
|
||||
if (rdev->family == CHIP_JUNIPER) {
|
||||
toffset = (RREG32(CG_THERMAL_CTRL) & TOFFSET_MASK) >>
|
||||
TOFFSET_SHIFT;
|
||||
temp = (RREG32(CG_TS0_STATUS) & TS0_ADC_DOUT_MASK) >>
|
||||
TS0_ADC_DOUT_SHIFT;
|
||||
|
||||
return (actual_temp * 1000) / 2;
|
||||
if (toffset & 0x100)
|
||||
actual_temp = temp / 2 - (0x200 - toffset);
|
||||
else
|
||||
actual_temp = temp / 2 + toffset;
|
||||
|
||||
actual_temp = actual_temp * 1000;
|
||||
|
||||
} else {
|
||||
temp = (RREG32(CG_MULT_THERMAL_STATUS) & ASIC_T_MASK) >>
|
||||
ASIC_T_SHIFT;
|
||||
|
||||
if (temp & 0x400)
|
||||
actual_temp = -256;
|
||||
else if (temp & 0x200)
|
||||
actual_temp = 255;
|
||||
else if (temp & 0x100) {
|
||||
actual_temp = temp & 0x1ff;
|
||||
actual_temp |= ~0x1ff;
|
||||
} else
|
||||
actual_temp = temp & 0xff;
|
||||
|
||||
actual_temp = (actual_temp * 1000) / 2;
|
||||
}
|
||||
|
||||
return actual_temp;
|
||||
}
|
||||
|
||||
int sumo_get_temp(struct radeon_device *rdev)
|
||||
|
@ -1415,6 +1433,8 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
|
|||
case CHIP_CEDAR:
|
||||
case CHIP_REDWOOD:
|
||||
case CHIP_PALM:
|
||||
case CHIP_SUMO:
|
||||
case CHIP_SUMO2:
|
||||
case CHIP_TURKS:
|
||||
case CHIP_CAICOS:
|
||||
force_no_swizzle = false;
|
||||
|
@ -1544,6 +1564,8 @@ static void evergreen_program_channel_remap(struct radeon_device *rdev)
|
|||
case CHIP_REDWOOD:
|
||||
case CHIP_CEDAR:
|
||||
case CHIP_PALM:
|
||||
case CHIP_SUMO:
|
||||
case CHIP_SUMO2:
|
||||
case CHIP_TURKS:
|
||||
case CHIP_CAICOS:
|
||||
default:
|
||||
|
@ -1685,6 +1707,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
|
|||
rdev->config.evergreen.max_hw_contexts = 4;
|
||||
rdev->config.evergreen.sq_num_cf_insts = 1;
|
||||
|
||||
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
|
||||
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
|
||||
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
|
||||
break;
|
||||
case CHIP_SUMO:
|
||||
rdev->config.evergreen.num_ses = 1;
|
||||
rdev->config.evergreen.max_pipes = 4;
|
||||
rdev->config.evergreen.max_tile_pipes = 2;
|
||||
if (rdev->pdev->device == 0x9648)
|
||||
rdev->config.evergreen.max_simds = 3;
|
||||
else if ((rdev->pdev->device == 0x9647) ||
|
||||
(rdev->pdev->device == 0x964a))
|
||||
rdev->config.evergreen.max_simds = 4;
|
||||
else
|
||||
rdev->config.evergreen.max_simds = 5;
|
||||
rdev->config.evergreen.max_backends = 2 * rdev->config.evergreen.num_ses;
|
||||
rdev->config.evergreen.max_gprs = 256;
|
||||
rdev->config.evergreen.max_threads = 248;
|
||||
rdev->config.evergreen.max_gs_threads = 32;
|
||||
rdev->config.evergreen.max_stack_entries = 256;
|
||||
rdev->config.evergreen.sx_num_of_sets = 4;
|
||||
rdev->config.evergreen.sx_max_export_size = 256;
|
||||
rdev->config.evergreen.sx_max_export_pos_size = 64;
|
||||
rdev->config.evergreen.sx_max_export_smx_size = 192;
|
||||
rdev->config.evergreen.max_hw_contexts = 8;
|
||||
rdev->config.evergreen.sq_num_cf_insts = 2;
|
||||
|
||||
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
|
||||
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
|
||||
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
|
||||
break;
|
||||
case CHIP_SUMO2:
|
||||
rdev->config.evergreen.num_ses = 1;
|
||||
rdev->config.evergreen.max_pipes = 4;
|
||||
rdev->config.evergreen.max_tile_pipes = 4;
|
||||
rdev->config.evergreen.max_simds = 2;
|
||||
rdev->config.evergreen.max_backends = 1 * rdev->config.evergreen.num_ses;
|
||||
rdev->config.evergreen.max_gprs = 256;
|
||||
rdev->config.evergreen.max_threads = 248;
|
||||
rdev->config.evergreen.max_gs_threads = 32;
|
||||
rdev->config.evergreen.max_stack_entries = 512;
|
||||
rdev->config.evergreen.sx_num_of_sets = 4;
|
||||
rdev->config.evergreen.sx_max_export_size = 256;
|
||||
rdev->config.evergreen.sx_max_export_pos_size = 64;
|
||||
rdev->config.evergreen.sx_max_export_smx_size = 192;
|
||||
rdev->config.evergreen.max_hw_contexts = 8;
|
||||
rdev->config.evergreen.sq_num_cf_insts = 2;
|
||||
|
||||
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
|
||||
rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
|
||||
rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
|
||||
|
@ -2039,6 +2109,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
|
|||
switch (rdev->family) {
|
||||
case CHIP_CEDAR:
|
||||
case CHIP_PALM:
|
||||
case CHIP_SUMO:
|
||||
case CHIP_SUMO2:
|
||||
case CHIP_CAICOS:
|
||||
/* no vertex cache */
|
||||
sq_config &= ~VC_ENABLE;
|
||||
|
@ -2060,6 +2132,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
|
|||
switch (rdev->family) {
|
||||
case CHIP_CEDAR:
|
||||
case CHIP_PALM:
|
||||
case CHIP_SUMO:
|
||||
case CHIP_SUMO2:
|
||||
ps_thread_count = 96;
|
||||
break;
|
||||
default:
|
||||
|
@ -2099,6 +2173,8 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
|
|||
switch (rdev->family) {
|
||||
case CHIP_CEDAR:
|
||||
case CHIP_PALM:
|
||||
case CHIP_SUMO:
|
||||
case CHIP_SUMO2:
|
||||
case CHIP_CAICOS:
|
||||
vgt_cache_invalidation = CACHE_INVALIDATION(TC_ONLY);
|
||||
break;
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
|
||||
#include "evergreend.h"
|
||||
#include "evergreen_blit_shaders.h"
|
||||
#include "cayman_blit_shaders.h"
|
||||
|
||||
#define DI_PT_RECTLIST 0x11
|
||||
#define DI_INDEX_SIZE_16_BIT 0x0
|
||||
|
@ -152,6 +153,8 @@ set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr)
|
|||
|
||||
if ((rdev->family == CHIP_CEDAR) ||
|
||||
(rdev->family == CHIP_PALM) ||
|
||||
(rdev->family == CHIP_SUMO) ||
|
||||
(rdev->family == CHIP_SUMO2) ||
|
||||
(rdev->family == CHIP_CAICOS))
|
||||
cp_set_surface_sync(rdev,
|
||||
PACKET3_TC_ACTION_ENA, 48, gpu_addr);
|
||||
|
@ -199,6 +202,16 @@ static void
|
|||
set_scissors(struct radeon_device *rdev, int x1, int y1,
|
||||
int x2, int y2)
|
||||
{
|
||||
/* workaround some hw bugs */
|
||||
if (x2 == 0)
|
||||
x1 = 1;
|
||||
if (y2 == 0)
|
||||
y1 = 1;
|
||||
if (rdev->family == CHIP_CAYMAN) {
|
||||
if ((x2 == 1) && (y2 == 1))
|
||||
x2 = 2;
|
||||
}
|
||||
|
||||
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2));
|
||||
radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_START) >> 2);
|
||||
radeon_ring_write(rdev, (x1 << 0) | (y1 << 16));
|
||||
|
@ -255,238 +268,284 @@ set_default_state(struct radeon_device *rdev)
|
|||
u64 gpu_addr;
|
||||
int dwords;
|
||||
|
||||
switch (rdev->family) {
|
||||
case CHIP_CEDAR:
|
||||
default:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 96;
|
||||
num_vs_threads = 16;
|
||||
num_gs_threads = 16;
|
||||
num_es_threads = 16;
|
||||
num_hs_threads = 16;
|
||||
num_ls_threads = 16;
|
||||
num_ps_stack_entries = 42;
|
||||
num_vs_stack_entries = 42;
|
||||
num_gs_stack_entries = 42;
|
||||
num_es_stack_entries = 42;
|
||||
num_hs_stack_entries = 42;
|
||||
num_ls_stack_entries = 42;
|
||||
break;
|
||||
case CHIP_REDWOOD:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 128;
|
||||
num_vs_threads = 20;
|
||||
num_gs_threads = 20;
|
||||
num_es_threads = 20;
|
||||
num_hs_threads = 20;
|
||||
num_ls_threads = 20;
|
||||
num_ps_stack_entries = 42;
|
||||
num_vs_stack_entries = 42;
|
||||
num_gs_stack_entries = 42;
|
||||
num_es_stack_entries = 42;
|
||||
num_hs_stack_entries = 42;
|
||||
num_ls_stack_entries = 42;
|
||||
break;
|
||||
case CHIP_JUNIPER:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 128;
|
||||
num_vs_threads = 20;
|
||||
num_gs_threads = 20;
|
||||
num_es_threads = 20;
|
||||
num_hs_threads = 20;
|
||||
num_ls_threads = 20;
|
||||
num_ps_stack_entries = 85;
|
||||
num_vs_stack_entries = 85;
|
||||
num_gs_stack_entries = 85;
|
||||
num_es_stack_entries = 85;
|
||||
num_hs_stack_entries = 85;
|
||||
num_ls_stack_entries = 85;
|
||||
break;
|
||||
case CHIP_CYPRESS:
|
||||
case CHIP_HEMLOCK:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 128;
|
||||
num_vs_threads = 20;
|
||||
num_gs_threads = 20;
|
||||
num_es_threads = 20;
|
||||
num_hs_threads = 20;
|
||||
num_ls_threads = 20;
|
||||
num_ps_stack_entries = 85;
|
||||
num_vs_stack_entries = 85;
|
||||
num_gs_stack_entries = 85;
|
||||
num_es_stack_entries = 85;
|
||||
num_hs_stack_entries = 85;
|
||||
num_ls_stack_entries = 85;
|
||||
break;
|
||||
case CHIP_PALM:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 96;
|
||||
num_vs_threads = 16;
|
||||
num_gs_threads = 16;
|
||||
num_es_threads = 16;
|
||||
num_hs_threads = 16;
|
||||
num_ls_threads = 16;
|
||||
num_ps_stack_entries = 42;
|
||||
num_vs_stack_entries = 42;
|
||||
num_gs_stack_entries = 42;
|
||||
num_es_stack_entries = 42;
|
||||
num_hs_stack_entries = 42;
|
||||
num_ls_stack_entries = 42;
|
||||
break;
|
||||
case CHIP_BARTS:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 128;
|
||||
num_vs_threads = 20;
|
||||
num_gs_threads = 20;
|
||||
num_es_threads = 20;
|
||||
num_hs_threads = 20;
|
||||
num_ls_threads = 20;
|
||||
num_ps_stack_entries = 85;
|
||||
num_vs_stack_entries = 85;
|
||||
num_gs_stack_entries = 85;
|
||||
num_es_stack_entries = 85;
|
||||
num_hs_stack_entries = 85;
|
||||
num_ls_stack_entries = 85;
|
||||
break;
|
||||
case CHIP_TURKS:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 128;
|
||||
num_vs_threads = 20;
|
||||
num_gs_threads = 20;
|
||||
num_es_threads = 20;
|
||||
num_hs_threads = 20;
|
||||
num_ls_threads = 20;
|
||||
num_ps_stack_entries = 42;
|
||||
num_vs_stack_entries = 42;
|
||||
num_gs_stack_entries = 42;
|
||||
num_es_stack_entries = 42;
|
||||
num_hs_stack_entries = 42;
|
||||
num_ls_stack_entries = 42;
|
||||
break;
|
||||
case CHIP_CAICOS:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 128;
|
||||
num_vs_threads = 10;
|
||||
num_gs_threads = 10;
|
||||
num_es_threads = 10;
|
||||
num_hs_threads = 10;
|
||||
num_ls_threads = 10;
|
||||
num_ps_stack_entries = 42;
|
||||
num_vs_stack_entries = 42;
|
||||
num_gs_stack_entries = 42;
|
||||
num_es_stack_entries = 42;
|
||||
num_hs_stack_entries = 42;
|
||||
num_ls_stack_entries = 42;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((rdev->family == CHIP_CEDAR) ||
|
||||
(rdev->family == CHIP_PALM) ||
|
||||
(rdev->family == CHIP_CAICOS))
|
||||
sq_config = 0;
|
||||
else
|
||||
sq_config = VC_ENABLE;
|
||||
|
||||
sq_config |= (EXPORT_SRC_C |
|
||||
CS_PRIO(0) |
|
||||
LS_PRIO(0) |
|
||||
HS_PRIO(0) |
|
||||
PS_PRIO(0) |
|
||||
VS_PRIO(1) |
|
||||
GS_PRIO(2) |
|
||||
ES_PRIO(3));
|
||||
|
||||
sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
|
||||
NUM_VS_GPRS(num_vs_gprs) |
|
||||
NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
|
||||
sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
|
||||
NUM_ES_GPRS(num_es_gprs));
|
||||
sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
|
||||
NUM_LS_GPRS(num_ls_gprs));
|
||||
sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
|
||||
NUM_VS_THREADS(num_vs_threads) |
|
||||
NUM_GS_THREADS(num_gs_threads) |
|
||||
NUM_ES_THREADS(num_es_threads));
|
||||
sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
|
||||
NUM_LS_THREADS(num_ls_threads));
|
||||
sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
|
||||
NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
|
||||
sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
|
||||
NUM_ES_STACK_ENTRIES(num_es_stack_entries));
|
||||
sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
|
||||
NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
|
||||
|
||||
/* set clear context state */
|
||||
radeon_ring_write(rdev, PACKET3(PACKET3_CLEAR_STATE, 0));
|
||||
radeon_ring_write(rdev, 0);
|
||||
|
||||
/* disable dyn gprs */
|
||||
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(rdev, 0);
|
||||
if (rdev->family < CHIP_CAYMAN) {
|
||||
switch (rdev->family) {
|
||||
case CHIP_CEDAR:
|
||||
default:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 96;
|
||||
num_vs_threads = 16;
|
||||
num_gs_threads = 16;
|
||||
num_es_threads = 16;
|
||||
num_hs_threads = 16;
|
||||
num_ls_threads = 16;
|
||||
num_ps_stack_entries = 42;
|
||||
num_vs_stack_entries = 42;
|
||||
num_gs_stack_entries = 42;
|
||||
num_es_stack_entries = 42;
|
||||
num_hs_stack_entries = 42;
|
||||
num_ls_stack_entries = 42;
|
||||
break;
|
||||
case CHIP_REDWOOD:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 128;
|
||||
num_vs_threads = 20;
|
||||
num_gs_threads = 20;
|
||||
num_es_threads = 20;
|
||||
num_hs_threads = 20;
|
||||
num_ls_threads = 20;
|
||||
num_ps_stack_entries = 42;
|
||||
num_vs_stack_entries = 42;
|
||||
num_gs_stack_entries = 42;
|
||||
num_es_stack_entries = 42;
|
||||
num_hs_stack_entries = 42;
|
||||
num_ls_stack_entries = 42;
|
||||
break;
|
||||
case CHIP_JUNIPER:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 128;
|
||||
num_vs_threads = 20;
|
||||
num_gs_threads = 20;
|
||||
num_es_threads = 20;
|
||||
num_hs_threads = 20;
|
||||
num_ls_threads = 20;
|
||||
num_ps_stack_entries = 85;
|
||||
num_vs_stack_entries = 85;
|
||||
num_gs_stack_entries = 85;
|
||||
num_es_stack_entries = 85;
|
||||
num_hs_stack_entries = 85;
|
||||
num_ls_stack_entries = 85;
|
||||
break;
|
||||
case CHIP_CYPRESS:
|
||||
case CHIP_HEMLOCK:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 128;
|
||||
num_vs_threads = 20;
|
||||
num_gs_threads = 20;
|
||||
num_es_threads = 20;
|
||||
num_hs_threads = 20;
|
||||
num_ls_threads = 20;
|
||||
num_ps_stack_entries = 85;
|
||||
num_vs_stack_entries = 85;
|
||||
num_gs_stack_entries = 85;
|
||||
num_es_stack_entries = 85;
|
||||
num_hs_stack_entries = 85;
|
||||
num_ls_stack_entries = 85;
|
||||
break;
|
||||
case CHIP_PALM:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 96;
|
||||
num_vs_threads = 16;
|
||||
num_gs_threads = 16;
|
||||
num_es_threads = 16;
|
||||
num_hs_threads = 16;
|
||||
num_ls_threads = 16;
|
||||
num_ps_stack_entries = 42;
|
||||
num_vs_stack_entries = 42;
|
||||
num_gs_stack_entries = 42;
|
||||
num_es_stack_entries = 42;
|
||||
num_hs_stack_entries = 42;
|
||||
num_ls_stack_entries = 42;
|
||||
break;
|
||||
case CHIP_SUMO:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 96;
|
||||
num_vs_threads = 25;
|
||||
num_gs_threads = 25;
|
||||
num_es_threads = 25;
|
||||
num_hs_threads = 25;
|
||||
num_ls_threads = 25;
|
||||
num_ps_stack_entries = 42;
|
||||
num_vs_stack_entries = 42;
|
||||
num_gs_stack_entries = 42;
|
||||
num_es_stack_entries = 42;
|
||||
num_hs_stack_entries = 42;
|
||||
num_ls_stack_entries = 42;
|
||||
break;
|
||||
case CHIP_SUMO2:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 96;
|
||||
num_vs_threads = 25;
|
||||
num_gs_threads = 25;
|
||||
num_es_threads = 25;
|
||||
num_hs_threads = 25;
|
||||
num_ls_threads = 25;
|
||||
num_ps_stack_entries = 85;
|
||||
num_vs_stack_entries = 85;
|
||||
num_gs_stack_entries = 85;
|
||||
num_es_stack_entries = 85;
|
||||
num_hs_stack_entries = 85;
|
||||
num_ls_stack_entries = 85;
|
||||
break;
|
||||
case CHIP_BARTS:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 128;
|
||||
num_vs_threads = 20;
|
||||
num_gs_threads = 20;
|
||||
num_es_threads = 20;
|
||||
num_hs_threads = 20;
|
||||
num_ls_threads = 20;
|
||||
num_ps_stack_entries = 85;
|
||||
num_vs_stack_entries = 85;
|
||||
num_gs_stack_entries = 85;
|
||||
num_es_stack_entries = 85;
|
||||
num_hs_stack_entries = 85;
|
||||
num_ls_stack_entries = 85;
|
||||
break;
|
||||
case CHIP_TURKS:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 128;
|
||||
num_vs_threads = 20;
|
||||
num_gs_threads = 20;
|
||||
num_es_threads = 20;
|
||||
num_hs_threads = 20;
|
||||
num_ls_threads = 20;
|
||||
num_ps_stack_entries = 42;
|
||||
num_vs_stack_entries = 42;
|
||||
num_gs_stack_entries = 42;
|
||||
num_es_stack_entries = 42;
|
||||
num_hs_stack_entries = 42;
|
||||
num_ls_stack_entries = 42;
|
||||
break;
|
||||
case CHIP_CAICOS:
|
||||
num_ps_gprs = 93;
|
||||
num_vs_gprs = 46;
|
||||
num_temp_gprs = 4;
|
||||
num_gs_gprs = 31;
|
||||
num_es_gprs = 31;
|
||||
num_hs_gprs = 23;
|
||||
num_ls_gprs = 23;
|
||||
num_ps_threads = 128;
|
||||
num_vs_threads = 10;
|
||||
num_gs_threads = 10;
|
||||
num_es_threads = 10;
|
||||
num_hs_threads = 10;
|
||||
num_ls_threads = 10;
|
||||
num_ps_stack_entries = 42;
|
||||
num_vs_stack_entries = 42;
|
||||
num_gs_stack_entries = 42;
|
||||
num_es_stack_entries = 42;
|
||||
num_hs_stack_entries = 42;
|
||||
num_ls_stack_entries = 42;
|
||||
break;
|
||||
}
|
||||
|
||||
/* SQ config */
|
||||
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
|
||||
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(rdev, sq_config);
|
||||
radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
|
||||
radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
|
||||
radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
|
||||
radeon_ring_write(rdev, 0);
|
||||
radeon_ring_write(rdev, 0);
|
||||
radeon_ring_write(rdev, sq_thread_resource_mgmt);
|
||||
radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
|
||||
radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
|
||||
radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
|
||||
radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
|
||||
if ((rdev->family == CHIP_CEDAR) ||
|
||||
(rdev->family == CHIP_PALM) ||
|
||||
(rdev->family == CHIP_SUMO) ||
|
||||
(rdev->family == CHIP_SUMO2) ||
|
||||
(rdev->family == CHIP_CAICOS))
|
||||
sq_config = 0;
|
||||
else
|
||||
sq_config = VC_ENABLE;
|
||||
|
||||
sq_config |= (EXPORT_SRC_C |
|
||||
CS_PRIO(0) |
|
||||
LS_PRIO(0) |
|
||||
HS_PRIO(0) |
|
||||
PS_PRIO(0) |
|
||||
VS_PRIO(1) |
|
||||
GS_PRIO(2) |
|
||||
ES_PRIO(3));
|
||||
|
||||
sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) |
|
||||
NUM_VS_GPRS(num_vs_gprs) |
|
||||
NUM_CLAUSE_TEMP_GPRS(num_temp_gprs));
|
||||
sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) |
|
||||
NUM_ES_GPRS(num_es_gprs));
|
||||
sq_gpr_resource_mgmt_3 = (NUM_HS_GPRS(num_hs_gprs) |
|
||||
NUM_LS_GPRS(num_ls_gprs));
|
||||
sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) |
|
||||
NUM_VS_THREADS(num_vs_threads) |
|
||||
NUM_GS_THREADS(num_gs_threads) |
|
||||
NUM_ES_THREADS(num_es_threads));
|
||||
sq_thread_resource_mgmt_2 = (NUM_HS_THREADS(num_hs_threads) |
|
||||
NUM_LS_THREADS(num_ls_threads));
|
||||
sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) |
|
||||
NUM_VS_STACK_ENTRIES(num_vs_stack_entries));
|
||||
sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) |
|
||||
NUM_ES_STACK_ENTRIES(num_es_stack_entries));
|
||||
sq_stack_resource_mgmt_3 = (NUM_HS_STACK_ENTRIES(num_hs_stack_entries) |
|
||||
NUM_LS_STACK_ENTRIES(num_ls_stack_entries));
|
||||
|
||||
/* disable dyn gprs */
|
||||
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
|
||||
radeon_ring_write(rdev, (SQ_DYN_GPR_CNTL_PS_FLUSH_REQ - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(rdev, 0);
|
||||
|
||||
/* SQ config */
|
||||
radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 11));
|
||||
radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_START) >> 2);
|
||||
radeon_ring_write(rdev, sq_config);
|
||||
radeon_ring_write(rdev, sq_gpr_resource_mgmt_1);
|
||||
radeon_ring_write(rdev, sq_gpr_resource_mgmt_2);
|
||||
radeon_ring_write(rdev, sq_gpr_resource_mgmt_3);
|
||||
radeon_ring_write(rdev, 0);
|
||||
radeon_ring_write(rdev, 0);
|
||||
radeon_ring_write(rdev, sq_thread_resource_mgmt);
|
||||
radeon_ring_write(rdev, sq_thread_resource_mgmt_2);
|
||||
radeon_ring_write(rdev, sq_stack_resource_mgmt_1);
|
||||
radeon_ring_write(rdev, sq_stack_resource_mgmt_2);
|
||||
radeon_ring_write(rdev, sq_stack_resource_mgmt_3);
|
||||
}
|
||||
|
||||
/* CONTEXT_CONTROL */
|
||||
radeon_ring_write(rdev, 0xc0012800);
|
||||
|
@ -560,7 +619,10 @@ int evergreen_blit_init(struct radeon_device *rdev)
|
|||
mutex_init(&rdev->r600_blit.mutex);
|
||||
rdev->r600_blit.state_offset = 0;
|
||||
|
||||
rdev->r600_blit.state_len = evergreen_default_size;
|
||||
if (rdev->family < CHIP_CAYMAN)
|
||||
rdev->r600_blit.state_len = evergreen_default_size;
|
||||
else
|
||||
rdev->r600_blit.state_len = cayman_default_size;
|
||||
|
||||
dwords = rdev->r600_blit.state_len;
|
||||
while (dwords & 0xf) {
|
||||
|
@ -572,11 +634,17 @@ int evergreen_blit_init(struct radeon_device *rdev)
|
|||
obj_size = ALIGN(obj_size, 256);
|
||||
|
||||
rdev->r600_blit.vs_offset = obj_size;
|
||||
obj_size += evergreen_vs_size * 4;
|
||||
if (rdev->family < CHIP_CAYMAN)
|
||||
obj_size += evergreen_vs_size * 4;
|
||||
else
|
||||
obj_size += cayman_vs_size * 4;
|
||||
obj_size = ALIGN(obj_size, 256);
|
||||
|
||||
rdev->r600_blit.ps_offset = obj_size;
|
||||
obj_size += evergreen_ps_size * 4;
|
||||
if (rdev->family < CHIP_CAYMAN)
|
||||
obj_size += evergreen_ps_size * 4;
|
||||
else
|
||||
obj_size += cayman_ps_size * 4;
|
||||
obj_size = ALIGN(obj_size, 256);
|
||||
|
||||
r = radeon_bo_create(rdev, obj_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM,
|
||||
|
@ -599,16 +667,29 @@ int evergreen_blit_init(struct radeon_device *rdev)
|
|||
return r;
|
||||
}
|
||||
|
||||
memcpy_toio(ptr + rdev->r600_blit.state_offset,
|
||||
evergreen_default_state, rdev->r600_blit.state_len * 4);
|
||||
if (rdev->family < CHIP_CAYMAN) {
|
||||
memcpy_toio(ptr + rdev->r600_blit.state_offset,
|
||||
evergreen_default_state, rdev->r600_blit.state_len * 4);
|
||||
|
||||
if (num_packet2s)
|
||||
memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
|
||||
packet2s, num_packet2s * 4);
|
||||
for (i = 0; i < evergreen_vs_size; i++)
|
||||
*(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
|
||||
for (i = 0; i < evergreen_ps_size; i++)
|
||||
*(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
|
||||
if (num_packet2s)
|
||||
memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
|
||||
packet2s, num_packet2s * 4);
|
||||
for (i = 0; i < evergreen_vs_size; i++)
|
||||
*(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(evergreen_vs[i]);
|
||||
for (i = 0; i < evergreen_ps_size; i++)
|
||||
*(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(evergreen_ps[i]);
|
||||
} else {
|
||||
memcpy_toio(ptr + rdev->r600_blit.state_offset,
|
||||
cayman_default_state, rdev->r600_blit.state_len * 4);
|
||||
|
||||
if (num_packet2s)
|
||||
memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
|
||||
packet2s, num_packet2s * 4);
|
||||
for (i = 0; i < cayman_vs_size; i++)
|
||||
*(u32 *)((unsigned long)ptr + rdev->r600_blit.vs_offset + i * 4) = cpu_to_le32(cayman_vs[i]);
|
||||
for (i = 0; i < cayman_ps_size; i++)
|
||||
*(u32 *)((unsigned long)ptr + rdev->r600_blit.ps_offset + i * 4) = cpu_to_le32(cayman_ps[i]);
|
||||
}
|
||||
radeon_bo_kunmap(rdev->r600_blit.shader_obj);
|
||||
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
|
||||
|
||||
|
|
|
@ -168,10 +168,16 @@
|
|||
#define SE_DB_BUSY (1 << 30)
|
||||
#define SE_CB_BUSY (1 << 31)
|
||||
/* evergreen */
|
||||
#define CG_THERMAL_CTRL 0x72c
|
||||
#define TOFFSET_MASK 0x00003FE0
|
||||
#define TOFFSET_SHIFT 5
|
||||
#define CG_MULT_THERMAL_STATUS 0x740
|
||||
#define ASIC_T(x) ((x) << 16)
|
||||
#define ASIC_T_MASK 0x7FF0000
|
||||
#define ASIC_T_MASK 0x07FF0000
|
||||
#define ASIC_T_SHIFT 16
|
||||
#define CG_TS0_STATUS 0x760
|
||||
#define TS0_ADC_DOUT_MASK 0x000003FF
|
||||
#define TS0_ADC_DOUT_SHIFT 0
|
||||
/* APU */
|
||||
#define CG_THERMAL_STATUS 0x678
|
||||
|
||||
|
|
|
@ -1387,14 +1387,12 @@ static int cayman_startup(struct radeon_device *rdev)
|
|||
return r;
|
||||
cayman_gpu_init(rdev);
|
||||
|
||||
#if 0
|
||||
r = cayman_blit_init(rdev);
|
||||
r = evergreen_blit_init(rdev);
|
||||
if (r) {
|
||||
cayman_blit_fini(rdev);
|
||||
evergreen_blit_fini(rdev);
|
||||
rdev->asic->copy = NULL;
|
||||
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* allocate wb buffer */
|
||||
r = radeon_wb_init(rdev);
|
||||
|
@ -1452,7 +1450,7 @@ int cayman_resume(struct radeon_device *rdev)
|
|||
|
||||
int cayman_suspend(struct radeon_device *rdev)
|
||||
{
|
||||
/* int r; */
|
||||
int r;
|
||||
|
||||
/* FIXME: we should wait for ring to be empty */
|
||||
cayman_cp_enable(rdev, false);
|
||||
|
@ -1461,14 +1459,13 @@ int cayman_suspend(struct radeon_device *rdev)
|
|||
radeon_wb_disable(rdev);
|
||||
cayman_pcie_gart_disable(rdev);
|
||||
|
||||
#if 0
|
||||
/* unpin shaders bo */
|
||||
r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
|
||||
if (likely(r == 0)) {
|
||||
radeon_bo_unpin(rdev->r600_blit.shader_obj);
|
||||
radeon_bo_unreserve(rdev->r600_blit.shader_obj);
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1580,7 +1577,7 @@ int cayman_init(struct radeon_device *rdev)
|
|||
|
||||
void cayman_fini(struct radeon_device *rdev)
|
||||
{
|
||||
/* cayman_blit_fini(rdev); */
|
||||
evergreen_blit_fini(rdev);
|
||||
cayman_cp_fini(rdev);
|
||||
r600_irq_fini(rdev);
|
||||
radeon_wb_fini(rdev);
|
||||
|
|
|
@ -87,6 +87,10 @@ MODULE_FIRMWARE("radeon/CYPRESS_rlc.bin");
|
|||
MODULE_FIRMWARE("radeon/PALM_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/PALM_me.bin");
|
||||
MODULE_FIRMWARE("radeon/SUMO_rlc.bin");
|
||||
MODULE_FIRMWARE("radeon/SUMO_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/SUMO_me.bin");
|
||||
MODULE_FIRMWARE("radeon/SUMO2_pfp.bin");
|
||||
MODULE_FIRMWARE("radeon/SUMO2_me.bin");
|
||||
|
||||
int r600_debugfs_mc_info_init(struct radeon_device *rdev);
|
||||
|
||||
|
@ -2024,6 +2028,14 @@ int r600_init_microcode(struct radeon_device *rdev)
|
|||
chip_name = "PALM";
|
||||
rlc_chip_name = "SUMO";
|
||||
break;
|
||||
case CHIP_SUMO:
|
||||
chip_name = "SUMO";
|
||||
rlc_chip_name = "SUMO";
|
||||
break;
|
||||
case CHIP_SUMO2:
|
||||
chip_name = "SUMO2";
|
||||
rlc_chip_name = "SUMO";
|
||||
break;
|
||||
default: BUG();
|
||||
}
|
||||
|
||||
|
|
|
@ -71,20 +71,21 @@ struct r600_cs_track {
|
|||
u64 db_bo_mc;
|
||||
};
|
||||
|
||||
#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc }
|
||||
#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc }
|
||||
#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0 }
|
||||
#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc }
|
||||
#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0 }
|
||||
#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc }
|
||||
#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0 }
|
||||
#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16, vc }
|
||||
#define FMT_8_BIT(fmt, vc) [fmt] = { 1, 1, 1, vc, CHIP_R600 }
|
||||
#define FMT_16_BIT(fmt, vc) [fmt] = { 1, 1, 2, vc, CHIP_R600 }
|
||||
#define FMT_24_BIT(fmt) [fmt] = { 1, 1, 3, 0, CHIP_R600 }
|
||||
#define FMT_32_BIT(fmt, vc) [fmt] = { 1, 1, 4, vc, CHIP_R600 }
|
||||
#define FMT_48_BIT(fmt) [fmt] = { 1, 1, 6, 0, CHIP_R600 }
|
||||
#define FMT_64_BIT(fmt, vc) [fmt] = { 1, 1, 8, vc, CHIP_R600 }
|
||||
#define FMT_96_BIT(fmt) [fmt] = { 1, 1, 12, 0, CHIP_R600 }
|
||||
#define FMT_128_BIT(fmt, vc) [fmt] = { 1, 1, 16,vc, CHIP_R600 }
|
||||
|
||||
struct gpu_formats {
|
||||
unsigned blockwidth;
|
||||
unsigned blockheight;
|
||||
unsigned blocksize;
|
||||
unsigned valid_color;
|
||||
enum radeon_family min_family;
|
||||
};
|
||||
|
||||
static const struct gpu_formats color_formats_table[] = {
|
||||
|
@ -154,7 +155,11 @@ static const struct gpu_formats color_formats_table[] = {
|
|||
[V_038004_FMT_BC3] = { 4, 4, 16, 0 },
|
||||
[V_038004_FMT_BC4] = { 4, 4, 8, 0 },
|
||||
[V_038004_FMT_BC5] = { 4, 4, 16, 0},
|
||||
[V_038004_FMT_BC6] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
|
||||
[V_038004_FMT_BC7] = { 4, 4, 16, 0, CHIP_CEDAR}, /* Evergreen-only */
|
||||
|
||||
/* The other Evergreen formats */
|
||||
[V_038004_FMT_32_AS_32_32_32_32] = { 1, 1, 4, 0, CHIP_CEDAR},
|
||||
};
|
||||
|
||||
static inline bool fmt_is_valid_color(u32 format)
|
||||
|
@ -168,11 +173,14 @@ static inline bool fmt_is_valid_color(u32 format)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline bool fmt_is_valid_texture(u32 format)
|
||||
static inline bool fmt_is_valid_texture(u32 format, enum radeon_family family)
|
||||
{
|
||||
if (format >= ARRAY_SIZE(color_formats_table))
|
||||
return false;
|
||||
|
||||
if (family < color_formats_table[format].min_family)
|
||||
return false;
|
||||
|
||||
if (color_formats_table[format].blockwidth > 0)
|
||||
return true;
|
||||
|
||||
|
@ -1325,7 +1333,7 @@ static inline int r600_check_texture_resource(struct radeon_cs_parser *p, u32 i
|
|||
return -EINVAL;
|
||||
}
|
||||
format = G_038004_DATA_FORMAT(word1);
|
||||
if (!fmt_is_valid_texture(format)) {
|
||||
if (!fmt_is_valid_texture(format, p->family)) {
|
||||
dev_warn(p->dev, "%s:%d texture invalid format %d\n",
|
||||
__func__, __LINE__, format);
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1309,6 +1309,9 @@
|
|||
#define V_038004_FMT_BC3 0x00000033
|
||||
#define V_038004_FMT_BC4 0x00000034
|
||||
#define V_038004_FMT_BC5 0x00000035
|
||||
#define V_038004_FMT_BC6 0x00000036
|
||||
#define V_038004_FMT_BC7 0x00000037
|
||||
#define V_038004_FMT_32_AS_32_32_32_32 0x00000038
|
||||
#define R_038010_SQ_TEX_RESOURCE_WORD4_0 0x038010
|
||||
#define S_038010_FORMAT_COMP_X(x) (((x) & 0x3) << 0)
|
||||
#define G_038010_FORMAT_COMP_X(x) (((x) >> 0) & 0x3)
|
||||
|
|
|
@ -906,9 +906,9 @@ static struct radeon_asic cayman_asic = {
|
|||
.get_vblank_counter = &evergreen_get_vblank_counter,
|
||||
.fence_ring_emit = &r600_fence_ring_emit,
|
||||
.cs_parse = &evergreen_cs_parse,
|
||||
.copy_blit = NULL,
|
||||
.copy_dma = NULL,
|
||||
.copy = NULL,
|
||||
.copy_blit = &evergreen_copy_blit,
|
||||
.copy_dma = &evergreen_copy_blit,
|
||||
.copy = &evergreen_copy_blit,
|
||||
.get_engine_clock = &radeon_atom_get_engine_clock,
|
||||
.set_engine_clock = &radeon_atom_set_engine_clock,
|
||||
.get_memory_clock = &radeon_atom_get_memory_clock,
|
||||
|
@ -1020,6 +1020,8 @@ int radeon_asic_init(struct radeon_device *rdev)
|
|||
rdev->asic = &evergreen_asic;
|
||||
break;
|
||||
case CHIP_PALM:
|
||||
case CHIP_SUMO:
|
||||
case CHIP_SUMO2:
|
||||
rdev->asic = &sumo_asic;
|
||||
break;
|
||||
case CHIP_BARTS:
|
||||
|
|
|
@ -228,6 +228,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
|
|||
parser.filp = filp;
|
||||
parser.rdev = rdev;
|
||||
parser.dev = rdev->dev;
|
||||
parser.family = rdev->family;
|
||||
r = radeon_cs_parser_init(&parser, data);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to initialize parser !\n");
|
||||
|
|
|
@ -82,6 +82,8 @@ static const char radeon_family_name[][16] = {
|
|||
"CYPRESS",
|
||||
"HEMLOCK",
|
||||
"PALM",
|
||||
"SUMO",
|
||||
"SUMO2",
|
||||
"BARTS",
|
||||
"TURKS",
|
||||
"CAICOS",
|
||||
|
@ -752,6 +754,7 @@ int radeon_device_init(struct radeon_device *rdev,
|
|||
dma_bits = rdev->need_dma32 ? 32 : 40;
|
||||
r = pci_set_dma_mask(rdev->pdev, DMA_BIT_MASK(dma_bits));
|
||||
if (r) {
|
||||
rdev->need_dma32 = true;
|
||||
printk(KERN_WARNING "radeon: No suitable DMA available.\n");
|
||||
}
|
||||
|
||||
|
|
|
@ -264,6 +264,8 @@ static void radeon_unpin_work_func(struct work_struct *__work)
|
|||
radeon_bo_unreserve(work->old_rbo);
|
||||
} else
|
||||
DRM_ERROR("failed to reserve buffer after flip\n");
|
||||
|
||||
drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
|
||||
kfree(work);
|
||||
}
|
||||
|
||||
|
@ -371,6 +373,8 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
|
|||
new_radeon_fb = to_radeon_framebuffer(fb);
|
||||
/* schedule unpin of the old buffer */
|
||||
obj = old_radeon_fb->obj;
|
||||
/* take a reference to the old object */
|
||||
drm_gem_object_reference(obj);
|
||||
rbo = gem_to_radeon_bo(obj);
|
||||
work->old_rbo = rbo;
|
||||
INIT_WORK(&work->work, radeon_unpin_work_func);
|
||||
|
@ -378,12 +382,9 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
|
|||
/* We borrow the event spin lock for protecting unpin_work */
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
if (radeon_crtc->unpin_work) {
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
kfree(work);
|
||||
radeon_fence_unref(&fence);
|
||||
|
||||
DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
|
||||
return -EBUSY;
|
||||
r = -EBUSY;
|
||||
goto unlock_free;
|
||||
}
|
||||
radeon_crtc->unpin_work = work;
|
||||
radeon_crtc->deferred_flip_completion = 0;
|
||||
|
@ -497,6 +498,8 @@ pflip_cleanup1:
|
|||
pflip_cleanup:
|
||||
spin_lock_irqsave(&dev->event_lock, flags);
|
||||
radeon_crtc->unpin_work = NULL;
|
||||
unlock_free:
|
||||
drm_gem_object_unreference_unlocked(old_radeon_fb->obj);
|
||||
spin_unlock_irqrestore(&dev->event_lock, flags);
|
||||
radeon_fence_unref(&fence);
|
||||
kfree(work);
|
||||
|
|
|
@ -113,7 +113,7 @@ int radeon_benchmarking = 0;
|
|||
int radeon_testing = 0;
|
||||
int radeon_connector_table = 0;
|
||||
int radeon_tv = 1;
|
||||
int radeon_audio = 1;
|
||||
int radeon_audio = 0;
|
||||
int radeon_disp_priority = 0;
|
||||
int radeon_hw_i2c = 0;
|
||||
int radeon_pcie_gen2 = 0;
|
||||
|
@ -151,7 +151,7 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
|
|||
MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
|
||||
module_param_named(tv, radeon_tv, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
|
||||
MODULE_PARM_DESC(audio, "Audio enable (1 = enable)");
|
||||
module_param_named(audio, radeon_audio, int, 0444);
|
||||
|
||||
MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
|
||||
|
|
|
@ -954,10 +954,15 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
|
|||
int dp_lane_count = 0;
|
||||
int connector_object_id = 0;
|
||||
int igp_lane_info = 0;
|
||||
int dig_encoder = dig->dig_encoder;
|
||||
|
||||
if (action == ATOM_TRANSMITTER_ACTION_INIT)
|
||||
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
|
||||
connector = radeon_get_connector_for_encoder_init(encoder);
|
||||
else
|
||||
/* just needed to avoid bailing in the encoder check. the encoder
|
||||
* isn't used for init
|
||||
*/
|
||||
dig_encoder = 0;
|
||||
} else
|
||||
connector = radeon_get_connector_for_encoder(encoder);
|
||||
|
||||
if (connector) {
|
||||
|
@ -973,7 +978,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
|
|||
}
|
||||
|
||||
/* no dig encoder assigned */
|
||||
if (dig->dig_encoder == -1)
|
||||
if (dig_encoder == -1)
|
||||
return;
|
||||
|
||||
if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
|
||||
|
@ -1023,7 +1028,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
|
|||
|
||||
if (dig->linkb)
|
||||
args.v3.acConfig.ucLinkSel = 1;
|
||||
if (dig->dig_encoder & 1)
|
||||
if (dig_encoder & 1)
|
||||
args.v3.acConfig.ucEncoderSel = 1;
|
||||
|
||||
/* Select the PLL for the PHY
|
||||
|
@ -1073,7 +1078,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
|
|||
args.v3.acConfig.fDualLinkConnector = 1;
|
||||
}
|
||||
} else if (ASIC_IS_DCE32(rdev)) {
|
||||
args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
|
||||
args.v2.acConfig.ucEncoderSel = dig_encoder;
|
||||
if (dig->linkb)
|
||||
args.v2.acConfig.ucLinkSel = 1;
|
||||
|
||||
|
@ -1100,7 +1105,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
|
|||
} else {
|
||||
args.v1.ucConfig = ATOM_TRANSMITTER_CONFIG_CLKSRC_PPLL;
|
||||
|
||||
if (dig->dig_encoder)
|
||||
if (dig_encoder)
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
|
||||
else
|
||||
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
|
||||
|
|
|
@ -81,6 +81,8 @@ enum radeon_family {
|
|||
CHIP_CYPRESS,
|
||||
CHIP_HEMLOCK,
|
||||
CHIP_PALM,
|
||||
CHIP_SUMO,
|
||||
CHIP_SUMO2,
|
||||
CHIP_BARTS,
|
||||
CHIP_TURKS,
|
||||
CHIP_CAICOS,
|
||||
|
|
|
@ -487,6 +487,7 @@ static int radeon_hwmon_init(struct radeon_device *rdev)
|
|||
case THERMAL_TYPE_RV6XX:
|
||||
case THERMAL_TYPE_RV770:
|
||||
case THERMAL_TYPE_EVERGREEN:
|
||||
case THERMAL_TYPE_NI:
|
||||
case THERMAL_TYPE_SUMO:
|
||||
rdev->pm.int_hwmon_dev = hwmon_device_register(rdev->dev);
|
||||
if (IS_ERR(rdev->pm.int_hwmon_dev)) {
|
||||
|
|
|
@ -758,6 +758,5 @@ r600 0x9400
|
|||
0x00009714 VC_ENHANCE
|
||||
0x00009830 DB_DEBUG
|
||||
0x00009838 DB_WATERMARKS
|
||||
0x00028D28 DB_SRESULTS_COMPARE_STATE0
|
||||
0x00028D44 DB_ALPHA_TO_MASK
|
||||
0x00009700 VC_CNTL
|
||||
|
|
|
@ -60,8 +60,6 @@ static int anysee_ctrl_msg(struct dvb_usb_device *d, u8 *sbuf, u8 slen,
|
|||
int act_len, ret;
|
||||
u8 buf[64];
|
||||
|
||||
if (slen > sizeof(buf))
|
||||
slen = sizeof(buf);
|
||||
memcpy(&buf[0], sbuf, slen);
|
||||
buf[60] = state->seq++;
|
||||
|
||||
|
@ -180,30 +178,37 @@ static int anysee_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msg,
|
|||
{
|
||||
struct dvb_usb_device *d = i2c_get_adapdata(adap);
|
||||
int ret = 0, inc, i = 0;
|
||||
u8 buf[52]; /* 4 + 48 (I2C WR USB command header + I2C WR max) */
|
||||
|
||||
if (mutex_lock_interruptible(&d->i2c_mutex) < 0)
|
||||
return -EAGAIN;
|
||||
|
||||
while (i < num) {
|
||||
if (num > i + 1 && (msg[i+1].flags & I2C_M_RD)) {
|
||||
u8 buf[6];
|
||||
if (msg[i].len > 2 || msg[i+1].len > 60) {
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
buf[0] = CMD_I2C_READ;
|
||||
buf[1] = (msg[i].addr << 1) | 0x01;
|
||||
buf[2] = msg[i].buf[0];
|
||||
buf[3] = msg[i].buf[1];
|
||||
buf[4] = msg[i].len-1;
|
||||
buf[5] = msg[i+1].len;
|
||||
ret = anysee_ctrl_msg(d, buf, sizeof(buf), msg[i+1].buf,
|
||||
ret = anysee_ctrl_msg(d, buf, 6, msg[i+1].buf,
|
||||
msg[i+1].len);
|
||||
inc = 2;
|
||||
} else {
|
||||
u8 buf[4+msg[i].len];
|
||||
if (msg[i].len > 48) {
|
||||
ret = -EOPNOTSUPP;
|
||||
break;
|
||||
}
|
||||
buf[0] = CMD_I2C_WRITE;
|
||||
buf[1] = (msg[i].addr << 1);
|
||||
buf[2] = msg[i].len;
|
||||
buf[3] = 0x01;
|
||||
memcpy(&buf[4], msg[i].buf, msg[i].len);
|
||||
ret = anysee_ctrl_msg(d, buf, sizeof(buf), NULL, 0);
|
||||
ret = anysee_ctrl_msg(d, buf, 4 + msg[i].len, NULL, 0);
|
||||
inc = 1;
|
||||
}
|
||||
if (ret)
|
||||
|
|
|
@ -213,14 +213,14 @@ int __must_check media_devnode_register(struct media_devnode *mdev)
|
|||
|
||||
/* Part 1: Find a free minor number */
|
||||
mutex_lock(&media_devnode_lock);
|
||||
minor = find_next_zero_bit(media_devnode_nums, 0, MEDIA_NUM_DEVICES);
|
||||
minor = find_next_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES, 0);
|
||||
if (minor == MEDIA_NUM_DEVICES) {
|
||||
mutex_unlock(&media_devnode_lock);
|
||||
printk(KERN_ERR "could not get a free minor\n");
|
||||
return -ENFILE;
|
||||
}
|
||||
|
||||
set_bit(mdev->minor, media_devnode_nums);
|
||||
set_bit(minor, media_devnode_nums);
|
||||
mutex_unlock(&media_devnode_lock);
|
||||
|
||||
mdev->minor = minor;
|
||||
|
|
|
@ -1,116 +0,0 @@
|
|||
/*
|
||||
* Auto gain algorithm for camera's with a coarse exposure control
|
||||
*
|
||||
* Copyright (C) 2010 Hans de Goede <hdegoede@redhat.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
|
||||
/* Autogain + exposure algorithm for cameras with a coarse exposure control
|
||||
(usually this means we can only control the clockdiv to change exposure)
|
||||
As changing the clockdiv so that the fps drops from 30 to 15 fps for
|
||||
example, will lead to a huge exposure change (it effectively doubles),
|
||||
this algorithm normally tries to only adjust the gain (between 40 and
|
||||
80 %) and if that does not help, only then changes exposure. This leads
|
||||
to a much more stable image then using the knee algorithm which at
|
||||
certain points of the knee graph will only try to adjust exposure,
|
||||
which leads to oscilating as one exposure step is huge.
|
||||
|
||||
Note this assumes that the sd struct for the cam in question has
|
||||
exp_too_high_cnt and exp_too_high_cnt int members for use by this function.
|
||||
|
||||
Returns 0 if no changes were made, 1 if the gain and or exposure settings
|
||||
where changed. */
|
||||
static int gspca_coarse_grained_expo_autogain(struct gspca_dev *gspca_dev,
|
||||
int avg_lum, int desired_avg_lum, int deadzone)
|
||||
{
|
||||
int i, steps, gain, orig_gain, exposure, orig_exposure;
|
||||
int gain_low, gain_high;
|
||||
const struct ctrl *gain_ctrl = NULL;
|
||||
const struct ctrl *exposure_ctrl = NULL;
|
||||
struct sd *sd = (struct sd *) gspca_dev;
|
||||
int retval = 0;
|
||||
|
||||
for (i = 0; i < gspca_dev->sd_desc->nctrls; i++) {
|
||||
if (gspca_dev->ctrl_dis & (1 << i))
|
||||
continue;
|
||||
if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_GAIN)
|
||||
gain_ctrl = &gspca_dev->sd_desc->ctrls[i];
|
||||
if (gspca_dev->sd_desc->ctrls[i].qctrl.id == V4L2_CID_EXPOSURE)
|
||||
exposure_ctrl = &gspca_dev->sd_desc->ctrls[i];
|
||||
}
|
||||
if (!gain_ctrl || !exposure_ctrl) {
|
||||
PDEBUG(D_ERR, "Error: gspca_coarse_grained_expo_autogain "
|
||||
"called on cam without gain or exposure");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (gain_ctrl->get(gspca_dev, &gain) ||
|
||||
exposure_ctrl->get(gspca_dev, &exposure))
|
||||
return 0;
|
||||
|
||||
orig_gain = gain;
|
||||
orig_exposure = exposure;
|
||||
gain_low =
|
||||
(gain_ctrl->qctrl.maximum - gain_ctrl->qctrl.minimum) / 5 * 2;
|
||||
gain_low += gain_ctrl->qctrl.minimum;
|
||||
gain_high =
|
||||
(gain_ctrl->qctrl.maximum - gain_ctrl->qctrl.minimum) / 5 * 4;
|
||||
gain_high += gain_ctrl->qctrl.minimum;
|
||||
|
||||
/* If we are of a multiple of deadzone, do multiple steps to reach the
|
||||
desired lumination fast (with the risc of a slight overshoot) */
|
||||
steps = (desired_avg_lum - avg_lum) / deadzone;
|
||||
|
||||
PDEBUG(D_FRAM, "autogain: lum: %d, desired: %d, steps: %d",
|
||||
avg_lum, desired_avg_lum, steps);
|
||||
|
||||
if ((gain + steps) > gain_high &&
|
||||
sd->exposure < exposure_ctrl->qctrl.maximum) {
|
||||
gain = gain_high;
|
||||
sd->exp_too_low_cnt++;
|
||||
} else if ((gain + steps) < gain_low &&
|
||||
sd->exposure > exposure_ctrl->qctrl.minimum) {
|
||||
gain = gain_low;
|
||||
sd->exp_too_high_cnt++;
|
||||
} else {
|
||||
gain += steps;
|
||||
if (gain > gain_ctrl->qctrl.maximum)
|
||||
gain = gain_ctrl->qctrl.maximum;
|
||||
else if (gain < gain_ctrl->qctrl.minimum)
|
||||
gain = gain_ctrl->qctrl.minimum;
|
||||
sd->exp_too_high_cnt = 0;
|
||||
sd->exp_too_low_cnt = 0;
|
||||
}
|
||||
|
||||
if (sd->exp_too_high_cnt > 3) {
|
||||
exposure--;
|
||||
sd->exp_too_high_cnt = 0;
|
||||
} else if (sd->exp_too_low_cnt > 3) {
|
||||
exposure++;
|
||||
sd->exp_too_low_cnt = 0;
|
||||
}
|
||||
|
||||
if (gain != orig_gain) {
|
||||
gain_ctrl->set(gspca_dev, gain);
|
||||
retval = 1;
|
||||
}
|
||||
if (exposure != orig_exposure) {
|
||||
exposure_ctrl->set(gspca_dev, exposure);
|
||||
retval = 1;
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
|
@ -609,7 +609,7 @@ static const struct v4l2_pix_format ovfx2_ov3610_mode[] = {
|
|||
* buffers, there are some pretty strict real time constraints for
|
||||
* isochronous transfer for larger frame sizes).
|
||||
*/
|
||||
/*jfm: this value works well for 1600x1200, but not 800x600 - see isoc_init */
|
||||
/*jfm: this value does not work for 800x600 - see isoc_init */
|
||||
#define OVFX2_BULK_SIZE (13 * 4096)
|
||||
|
||||
/* I2C registers */
|
||||
|
@ -3307,6 +3307,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
|
|||
|
||||
gspca_dev->cam.ctrls = sd->ctrls;
|
||||
sd->quality = QUALITY_DEF;
|
||||
sd->frame_rate = 15;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -3469,7 +3470,6 @@ static int sd_init(struct gspca_dev *gspca_dev)
|
|||
ARRAY_SIZE(init_519_ov7660));
|
||||
write_i2c_regvals(sd, norm_7660, ARRAY_SIZE(norm_7660));
|
||||
sd->gspca_dev.curr_mode = 1; /* 640x480 */
|
||||
sd->frame_rate = 15;
|
||||
ov519_set_mode(sd);
|
||||
ov519_set_fr(sd);
|
||||
sd->ctrls[COLORS].max = 4; /* 0..4 */
|
||||
|
@ -3511,7 +3511,7 @@ static int sd_isoc_init(struct gspca_dev *gspca_dev)
|
|||
|
||||
switch (sd->bridge) {
|
||||
case BRIDGE_OVFX2:
|
||||
if (gspca_dev->width == 1600)
|
||||
if (gspca_dev->width != 800)
|
||||
gspca_dev->cam.bulk_size = OVFX2_BULK_SIZE;
|
||||
else
|
||||
gspca_dev->cam.bulk_size = 7 * 4096;
|
||||
|
@ -4478,7 +4478,7 @@ static void ovfx2_pkt_scan(struct gspca_dev *gspca_dev,
|
|||
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
|
||||
|
||||
/* A short read signals EOF */
|
||||
if (len < OVFX2_BULK_SIZE) {
|
||||
if (len < gspca_dev->cam.bulk_size) {
|
||||
/* If the frame is short, and it is one of the first ones
|
||||
the sensor and bridge are still syncing, so drop it. */
|
||||
if (sd->first_frame) {
|
||||
|
|
|
@ -60,7 +60,7 @@ struct sd {
|
|||
|
||||
u32 pktsz; /* (used by pkt_scan) */
|
||||
u16 npkt;
|
||||
u8 nchg;
|
||||
s8 nchg;
|
||||
s8 short_mark;
|
||||
|
||||
u8 quality; /* image quality */
|
||||
|
|
|
@ -125,7 +125,7 @@
|
|||
#define HDCS_SLEEP_MODE (1 << 1)
|
||||
|
||||
#define HDCS_DEFAULT_EXPOSURE 48
|
||||
#define HDCS_DEFAULT_GAIN 128
|
||||
#define HDCS_DEFAULT_GAIN 50
|
||||
|
||||
static int hdcs_probe_1x00(struct sd *sd);
|
||||
static int hdcs_probe_1020(struct sd *sd);
|
||||
|
|
|
@ -1328,6 +1328,8 @@ int ivtv_init_on_first_open(struct ivtv *itv)
|
|||
if (!itv->has_cx23415)
|
||||
write_reg_sync(0x03, IVTV_REG_DMACONTROL);
|
||||
|
||||
ivtv_s_std_enc(itv, &itv->tuner_std);
|
||||
|
||||
/* Default interrupts enabled. For the PVR350 this includes the
|
||||
decoder VSYNC interrupt, which is always on. It is not only used
|
||||
during decoding but also by the OSD.
|
||||
|
@ -1336,12 +1338,10 @@ int ivtv_init_on_first_open(struct ivtv *itv)
|
|||
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
|
||||
ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT | IVTV_IRQ_DEC_VSYNC);
|
||||
ivtv_set_osd_alpha(itv);
|
||||
}
|
||||
else
|
||||
ivtv_s_std_dec(itv, &itv->tuner_std);
|
||||
} else {
|
||||
ivtv_clear_irq_mask(itv, IVTV_IRQ_MASK_INIT);
|
||||
|
||||
/* For cards with video out, this call needs interrupts enabled */
|
||||
ivtv_s_std(NULL, &fh, &itv->tuner_std);
|
||||
}
|
||||
|
||||
/* Setup initial controls */
|
||||
cx2341x_handler_setup(&itv->cxhdl);
|
||||
|
|
|
@ -280,8 +280,6 @@ int ivtv_firmware_restart(struct ivtv *itv)
|
|||
{
|
||||
int rc = 0;
|
||||
v4l2_std_id std;
|
||||
struct ivtv_open_id fh;
|
||||
fh.itv = itv;
|
||||
|
||||
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
|
||||
/* Display test image during restart */
|
||||
|
@ -301,14 +299,19 @@ int ivtv_firmware_restart(struct ivtv *itv)
|
|||
/* Allow settings to reload */
|
||||
ivtv_mailbox_cache_invalidate(itv);
|
||||
|
||||
/* Restore video standard */
|
||||
/* Restore encoder video standard */
|
||||
std = itv->std;
|
||||
itv->std = 0;
|
||||
ivtv_s_std(NULL, &fh, &std);
|
||||
ivtv_s_std_enc(itv, &std);
|
||||
|
||||
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
|
||||
ivtv_init_mpeg_decoder(itv);
|
||||
|
||||
/* Restore decoder video standard */
|
||||
std = itv->std_out;
|
||||
itv->std_out = 0;
|
||||
ivtv_s_std_dec(itv, &std);
|
||||
|
||||
/* Restore framebuffer if active */
|
||||
if (itv->ivtvfb_restore)
|
||||
itv->ivtvfb_restore(itv);
|
||||
|
|
|
@ -1071,28 +1071,8 @@ static int ivtv_g_std(struct file *file, void *fh, v4l2_std_id *std)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
|
||||
void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id *std)
|
||||
{
|
||||
DEFINE_WAIT(wait);
|
||||
struct ivtv *itv = fh2id(fh)->itv;
|
||||
struct yuv_playback_info *yi = &itv->yuv_info;
|
||||
int f;
|
||||
|
||||
if ((*std & V4L2_STD_ALL) == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (*std == itv->std)
|
||||
return 0;
|
||||
|
||||
if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ||
|
||||
atomic_read(&itv->capturing) > 0 ||
|
||||
atomic_read(&itv->decoding) > 0) {
|
||||
/* Switching standard would turn off the radio or mess
|
||||
with already running streams, prevent that by
|
||||
returning EBUSY. */
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
itv->std = *std;
|
||||
itv->is_60hz = (*std & V4L2_STD_525_60) ? 1 : 0;
|
||||
itv->is_50hz = !itv->is_60hz;
|
||||
|
@ -1106,48 +1086,79 @@ int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
|
|||
if (itv->hw_flags & IVTV_HW_CX25840)
|
||||
itv->vbi.sliced_decoder_line_size = itv->is_60hz ? 272 : 284;
|
||||
|
||||
IVTV_DEBUG_INFO("Switching standard to %llx.\n", (unsigned long long)itv->std);
|
||||
|
||||
/* Tuner */
|
||||
ivtv_call_all(itv, core, s_std, itv->std);
|
||||
}
|
||||
|
||||
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT) {
|
||||
/* set display standard */
|
||||
itv->std_out = *std;
|
||||
itv->is_out_60hz = itv->is_60hz;
|
||||
itv->is_out_50hz = itv->is_50hz;
|
||||
ivtv_call_all(itv, video, s_std_output, itv->std_out);
|
||||
void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
|
||||
{
|
||||
struct yuv_playback_info *yi = &itv->yuv_info;
|
||||
DEFINE_WAIT(wait);
|
||||
int f;
|
||||
|
||||
/*
|
||||
* The next firmware call is time sensitive. Time it to
|
||||
* avoid risk of a hard lock, by trying to ensure the call
|
||||
* happens within the first 100 lines of the top field.
|
||||
* Make 4 attempts to sync to the decoder before giving up.
|
||||
*/
|
||||
for (f = 0; f < 4; f++) {
|
||||
prepare_to_wait(&itv->vsync_waitq, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
|
||||
break;
|
||||
schedule_timeout(msecs_to_jiffies(25));
|
||||
}
|
||||
finish_wait(&itv->vsync_waitq, &wait);
|
||||
/* set display standard */
|
||||
itv->std_out = *std;
|
||||
itv->is_out_60hz = (*std & V4L2_STD_525_60) ? 1 : 0;
|
||||
itv->is_out_50hz = !itv->is_out_60hz;
|
||||
ivtv_call_all(itv, video, s_std_output, itv->std_out);
|
||||
|
||||
if (f == 4)
|
||||
IVTV_WARN("Mode change failed to sync to decoder\n");
|
||||
|
||||
ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
|
||||
itv->main_rect.left = itv->main_rect.top = 0;
|
||||
itv->main_rect.width = 720;
|
||||
itv->main_rect.height = itv->cxhdl.height;
|
||||
ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
|
||||
720, itv->main_rect.height, 0, 0);
|
||||
yi->main_rect = itv->main_rect;
|
||||
if (!itv->osd_info) {
|
||||
yi->osd_full_w = 720;
|
||||
yi->osd_full_h = itv->is_out_50hz ? 576 : 480;
|
||||
}
|
||||
/*
|
||||
* The next firmware call is time sensitive. Time it to
|
||||
* avoid risk of a hard lock, by trying to ensure the call
|
||||
* happens within the first 100 lines of the top field.
|
||||
* Make 4 attempts to sync to the decoder before giving up.
|
||||
*/
|
||||
for (f = 0; f < 4; f++) {
|
||||
prepare_to_wait(&itv->vsync_waitq, &wait,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
|
||||
break;
|
||||
schedule_timeout(msecs_to_jiffies(25));
|
||||
}
|
||||
finish_wait(&itv->vsync_waitq, &wait);
|
||||
|
||||
if (f == 4)
|
||||
IVTV_WARN("Mode change failed to sync to decoder\n");
|
||||
|
||||
ivtv_vapi(itv, CX2341X_DEC_SET_STANDARD, 1, itv->is_out_50hz);
|
||||
itv->main_rect.left = 0;
|
||||
itv->main_rect.top = 0;
|
||||
itv->main_rect.width = 720;
|
||||
itv->main_rect.height = itv->is_out_50hz ? 576 : 480;
|
||||
ivtv_vapi(itv, CX2341X_OSD_SET_FRAMEBUFFER_WINDOW, 4,
|
||||
720, itv->main_rect.height, 0, 0);
|
||||
yi->main_rect = itv->main_rect;
|
||||
if (!itv->osd_info) {
|
||||
yi->osd_full_w = 720;
|
||||
yi->osd_full_h = itv->is_out_50hz ? 576 : 480;
|
||||
}
|
||||
}
|
||||
|
||||
int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std)
|
||||
{
|
||||
struct ivtv *itv = fh2id(fh)->itv;
|
||||
|
||||
if ((*std & V4L2_STD_ALL) == 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (*std == itv->std)
|
||||
return 0;
|
||||
|
||||
if (test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags) ||
|
||||
atomic_read(&itv->capturing) > 0 ||
|
||||
atomic_read(&itv->decoding) > 0) {
|
||||
/* Switching standard would mess with already running
|
||||
streams, prevent that by returning EBUSY. */
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
IVTV_DEBUG_INFO("Switching standard to %llx.\n",
|
||||
(unsigned long long)itv->std);
|
||||
|
||||
ivtv_s_std_enc(itv, std);
|
||||
if (itv->v4l2_cap & V4L2_CAP_VIDEO_OUTPUT)
|
||||
ivtv_s_std_dec(itv, std);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,8 @@ u16 ivtv_get_service_set(struct v4l2_sliced_vbi_format *fmt);
|
|||
void ivtv_set_osd_alpha(struct ivtv *itv);
|
||||
int ivtv_set_speed(struct ivtv *itv, int speed);
|
||||
void ivtv_set_funcs(struct video_device *vdev);
|
||||
int ivtv_s_std(struct file *file, void *fh, v4l2_std_id *std);
|
||||
void ivtv_s_std_enc(struct ivtv *itv, v4l2_std_id *std);
|
||||
void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std);
|
||||
int ivtv_s_frequency(struct file *file, void *fh, struct v4l2_frequency *vf);
|
||||
int ivtv_s_input(struct file *file, void *fh, unsigned int inp);
|
||||
long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
|
||||
|
|
|
@ -589,7 +589,7 @@ int ivtv_start_v4l2_encode_stream(struct ivtv_stream *s)
|
|||
v4l2_subdev_call(itv->sd_audio, audio, s_stream, 1);
|
||||
/* Avoid unpredictable PCI bus hang - disable video clocks */
|
||||
v4l2_subdev_call(itv->sd_video, video, s_stream, 0);
|
||||
ivtv_msleep_timeout(300, 1);
|
||||
ivtv_msleep_timeout(300, 0);
|
||||
ivtv_vapi(itv, CX2341X_ENC_INITIALIZE_INPUT, 0);
|
||||
v4l2_subdev_call(itv->sd_video, video, s_stream, 1);
|
||||
}
|
||||
|
@ -834,7 +834,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
|
|||
}
|
||||
|
||||
/* Handle any pending interrupts */
|
||||
ivtv_msleep_timeout(100, 1);
|
||||
ivtv_msleep_timeout(100, 0);
|
||||
}
|
||||
|
||||
atomic_dec(&itv->capturing);
|
||||
|
|
Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше
Загрузка…
Ссылка в новой задаче