Merge branch 'linus' into percpu-cpumask-x86-for-linus-2

Conflicts:
	arch/sparc/kernel/time_64.c
	drivers/gpu/drm/drm_proc.c

Manual merge to resolve build warning due to phys_addr_t type change
on x86:

	drivers/gpu/drm/drm_info.c

Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Ingo Molnar 2009-03-28 04:21:18 +01:00
Родитель 6e15cf0486 5d80f8e5a9
Коммит 82268da1b1
408 изменённых файлов: 18152 добавлений и 6418 удалений

Просмотреть файл

@ -227,6 +227,12 @@ usage should require reading the full document.
!Pinclude/net/mac80211.h Powersave support
</chapter>
<chapter id="beacon-filter">
<title>Beacon filter support</title>
!Pinclude/net/mac80211.h Beacon filter support
!Finclude/net/mac80211.h ieee80211_beacon_loss
</chapter>
<chapter id="qos">
<title>Multiple queues and QoS support</title>
<para>TBD</para>

Просмотреть файл

@ -6,20 +6,47 @@ be removed from this file.
---------------------------
What: old static regulatory information and ieee80211_regdom module parameter
When: 2.6.29
What: The ieee80211_regdom module parameter
When: March 2010 / desktop catchup
Why: This was inherited by the CONFIG_WIRELESS_OLD_REGULATORY code,
and currently serves as an option for users to define an
ISO / IEC 3166 alpha2 code for the country they are currently
present in. Although there are userspace API replacements for this
through nl80211 distributions haven't yet caught up with implementing
decent alternatives through standard GUIs. Although available as an
option through iw or wpa_supplicant its just a matter of time before
distributions pick up good GUI options for this. The ideal solution
would actually consist of intelligent designs which would do this for
the user automatically even when travelling through different countries.
Until then we leave this module parameter as a compromise.
When userspace improves with reasonable widely-available alternatives for
this we will no longer need this module parameter. This entry hopes that
by the super-futuristically looking date of "March 2010" we will have
such replacements widely available.
Who: Luis R. Rodriguez <lrodriguez@atheros.com>
---------------------------
What: CONFIG_WIRELESS_OLD_REGULATORY - old static regulatory information
When: March 2010 / desktop catchup
Why: The old regulatory infrastructure has been replaced with a new one
which does not require statically defined regulatory domains. We do
not want to keep static regulatory domains in the kernel due to the
the dynamic nature of regulatory law and localization. We kept around
the old static definitions for the regulatory domains of:
* US
* JP
* EU
and used by default the US when CONFIG_WIRELESS_OLD_REGULATORY was
set. We also kept around the ieee80211_regdom module parameter in case
some applications were relying on it. Changing regulatory domains
can now be done instead by using nl80211, as is done with iw.
set. We will remove this option once the standard Linux desktop catches
up with the new userspace APIs we have implemented.
Who: Luis R. Rodriguez <lrodriguez@atheros.com>
---------------------------

Просмотреть файл

@ -765,6 +765,14 @@ L: linux-wireless@vger.kernel.org
L: ath9k-devel@lists.ath9k.org
S: Supported
ATHEROS AR9170 WIRELESS DRIVER
P: Christian Lamparter
M: chunkeey@web.de
L: linux-wireless@vger.kernel.org
W: http://wireless.kernel.org/en/users/Drivers/ar9170
S: Maintained
F: drivers/net/wireless/ar9170/
ATI_REMOTE2 DRIVER
P: Ville Syrjala
M: syrjala@sci.fi
@ -3602,7 +3610,7 @@ S: Maintained
RALINK RT2X00 WIRELESS LAN DRIVER
P: rt2x00 project
L: linux-wireless@vger.kernel.org
L: rt2400-devel@lists.sourceforge.net
L: users@rt2x00.serialmonkey.com
W: http://rt2x00.serialmonkey.com/
S: Maintained
T: git kernel.org:/pub/scm/linux/kernel/git/ivd/rt2x00.git

Просмотреть файл

@ -903,8 +903,9 @@ sys_alpha_pipe:
stq $26, 0($sp)
.prologue 0
mov $31, $17
lda $16, 8($sp)
jsr $26, do_pipe
jsr $26, do_pipe_flags
ldq $26, 0($sp)
bne $0, 1f

Просмотреть файл

@ -46,8 +46,6 @@
#include <asm/hwrpb.h>
#include <asm/processor.h>
extern int do_pipe(int *);
/*
* Brk needs to return an error. Still support Linux's brk(0) query idiom,
* which OSF programs just shouldn't be doing. We're still not quite

Просмотреть файл

@ -240,7 +240,7 @@ ia32_syscall_table:
data8 sys_ni_syscall
data8 sys_umask /* 60 */
data8 sys_chroot
data8 sys_ustat
data8 compat_sys_ustat
data8 sys_dup2
data8 sys_getppid
data8 sys_getpgrp /* 65 */

Просмотреть файл

@ -2196,7 +2196,7 @@ pfmfs_delete_dentry(struct dentry *dentry)
return 1;
}
static struct dentry_operations pfmfs_dentry_operations = {
static const struct dentry_operations pfmfs_dentry_operations = {
.d_delete = pfmfs_delete_dentry,
};

Просмотреть файл

@ -355,40 +355,6 @@ SYSCALL_DEFINE1(32_personality, unsigned long, personality)
return ret;
}
/* ustat compatibility */
struct ustat32 {
compat_daddr_t f_tfree;
compat_ino_t f_tinode;
char f_fname[6];
char f_fpack[6];
};
extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf);
SYSCALL_DEFINE2(32_ustat, dev_t, dev, struct ustat32 __user *, ubuf32)
{
int err;
struct ustat tmp;
struct ustat32 tmp32;
mm_segment_t old_fs = get_fs();
set_fs(KERNEL_DS);
err = sys_ustat(dev, (struct ustat __user *)&tmp);
set_fs(old_fs);
if (err)
goto out;
memset(&tmp32, 0, sizeof(struct ustat32));
tmp32.f_tfree = tmp.f_tfree;
tmp32.f_tinode = tmp.f_tinode;
err = copy_to_user(ubuf32, &tmp32, sizeof(struct ustat32)) ? -EFAULT : 0;
out:
return err;
}
SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd,
compat_off_t __user *, offset, s32, count)
{

Просмотреть файл

@ -253,7 +253,7 @@ EXPORT(sysn32_call_table)
PTR compat_sys_utime /* 6130 */
PTR sys_mknod
PTR sys_32_personality
PTR sys_32_ustat
PTR compat_sys_ustat
PTR compat_sys_statfs
PTR compat_sys_fstatfs /* 6135 */
PTR sys_sysfs

Просмотреть файл

@ -265,7 +265,7 @@ sys_call_table:
PTR sys_olduname
PTR sys_umask /* 4060 */
PTR sys_chroot
PTR sys_32_ustat
PTR compat_sys_ustat
PTR sys_dup2
PTR sys_getppid
PTR sys_getpgrp /* 4065 */

Просмотреть файл

@ -130,7 +130,7 @@
ENTRY_OURS(newuname)
ENTRY_SAME(umask) /* 60 */
ENTRY_SAME(chroot)
ENTRY_SAME(ustat)
ENTRY_COMP(ustat)
ENTRY_SAME(dup2)
ENTRY_SAME(getppid)
ENTRY_SAME(getpgrp) /* 65 */

Просмотреть файл

@ -65,7 +65,7 @@ SYSCALL(ni_syscall)
SYSX(sys_ni_syscall,sys_olduname, sys_olduname)
COMPAT_SYS_SPU(umask)
SYSCALL_SPU(chroot)
SYSCALL(ustat)
COMPAT_SYS(ustat)
SYSCALL_SPU(dup2)
SYSCALL_SPU(getppid)
SYSCALL_SPU(getpgrp)

Просмотреть файл

@ -252,7 +252,7 @@ sys32_chroot_wrapper:
sys32_ustat_wrapper:
llgfr %r2,%r2 # dev_t
llgtr %r3,%r3 # struct ustat *
jg sys_ustat
jg compat_sys_ustat
.globl sys32_dup2_wrapper
sys32_dup2_wrapper:

Просмотреть файл

@ -1031,7 +1031,7 @@ void smp_fetch_global_regs(void)
* If the address space is non-shared (ie. mm->count == 1) we avoid
* cross calls when we want to flush the currently running process's
* tlb state. This is done by clearing all cpu bits except the current
* processor's in current->active_mm->cpu_vm_mask and performing the
* processor's in current->mm->cpu_vm_mask and performing the
* flush locally only. This will force any subsequent cpus which run
* this task to flush the context from the local tlb if the process
* migrates to another cpu (again).
@ -1074,7 +1074,7 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
u32 ctx = CTX_HWBITS(mm->context);
int cpu = get_cpu();
if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
mm->cpu_vm_mask = cpumask_of_cpu(cpu);
else
smp_cross_call_masked(&xcall_flush_tlb_pending,

Просмотреть файл

@ -51,7 +51,7 @@ sys_call_table32:
/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
.word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall
.word sys_quotactl, sys_set_tid_address, compat_sys_mount, sys_ustat, sys32_setxattr
.word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys32_setxattr
/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents
.word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr
/*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall

Просмотреть файл

@ -36,10 +36,10 @@
#include <linux/clocksource.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <asm/oplib.h>
#include <asm/timer.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/starfire.h>

Просмотреть файл

@ -86,7 +86,7 @@ static int uml_net_rx(struct net_device *dev)
drop_skb->dev = dev;
/* Read a packet into drop_skb and don't do anything with it. */
(*lp->read)(lp->fd, drop_skb, lp);
lp->stats.rx_dropped++;
dev->stats.rx_dropped++;
return 0;
}
@ -99,8 +99,8 @@ static int uml_net_rx(struct net_device *dev)
skb_trim(skb, pkt_len);
skb->protocol = (*lp->protocol)(skb);
lp->stats.rx_bytes += skb->len;
lp->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
netif_rx(skb);
return pkt_len;
}
@ -224,8 +224,8 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
len = (*lp->write)(lp->fd, skb, lp);
if (len == skb->len) {
lp->stats.tx_packets++;
lp->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
dev->trans_start = jiffies;
netif_start_queue(dev);
@ -234,7 +234,7 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
else if (len == 0) {
netif_start_queue(dev);
lp->stats.tx_dropped++;
dev->stats.tx_dropped++;
}
else {
netif_start_queue(dev);
@ -248,12 +248,6 @@ static int uml_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
return 0;
}
static struct net_device_stats *uml_net_get_stats(struct net_device *dev)
{
struct uml_net_private *lp = netdev_priv(dev);
return &lp->stats;
}
static void uml_net_set_multicast_list(struct net_device *dev)
{
return;
@ -377,6 +371,18 @@ static void net_device_release(struct device *dev)
free_netdev(netdev);
}
static const struct net_device_ops uml_netdev_ops = {
.ndo_open = uml_net_open,
.ndo_stop = uml_net_close,
.ndo_start_xmit = uml_net_start_xmit,
.ndo_set_multicast_list = uml_net_set_multicast_list,
.ndo_tx_timeout = uml_net_tx_timeout,
.ndo_set_mac_address = uml_net_set_mac,
.ndo_change_mtu = uml_net_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
/*
* Ensures that platform_driver_register is called only once by
* eth_configure. Will be set in an initcall.
@ -473,14 +479,7 @@ static void eth_configure(int n, void *init, char *mac,
set_ether_mac(dev, device->mac);
dev->mtu = transport->user->mtu;
dev->open = uml_net_open;
dev->hard_start_xmit = uml_net_start_xmit;
dev->stop = uml_net_close;
dev->get_stats = uml_net_get_stats;
dev->set_multicast_list = uml_net_set_multicast_list;
dev->tx_timeout = uml_net_tx_timeout;
dev->set_mac_address = uml_net_set_mac;
dev->change_mtu = uml_net_change_mtu;
dev->netdev_ops = &uml_netdev_ops;
dev->ethtool_ops = &uml_net_ethtool_ops;
dev->watchdog_timeo = (HZ >> 1);
dev->irq = UM_ETH_IRQ;

Просмотреть файл

@ -26,7 +26,7 @@ struct uml_net_private {
spinlock_t lock;
struct net_device *dev;
struct timer_list tl;
struct net_device_stats stats;
struct work_struct work;
int fd;
unsigned char mac[ETH_ALEN];

Просмотреть файл

@ -557,7 +557,7 @@ ia32_sys_call_table:
.quad sys32_olduname
.quad sys_umask /* 60 */
.quad sys_chroot
.quad sys32_ustat
.quad compat_sys_ustat
.quad sys_dup2
.quad sys_getppid
.quad sys_getpgrp /* 65 */

Просмотреть файл

@ -638,28 +638,6 @@ long sys32_uname(struct old_utsname __user *name)
return err ? -EFAULT : 0;
}
long sys32_ustat(unsigned dev, struct ustat32 __user *u32p)
{
struct ustat u;
mm_segment_t seg;
int ret;
seg = get_fs();
set_fs(KERNEL_DS);
ret = sys_ustat(dev, (struct ustat __user *)&u);
set_fs(seg);
if (ret < 0)
return ret;
if (!access_ok(VERIFY_WRITE, u32p, sizeof(struct ustat32)) ||
__put_user((__u32) u.f_tfree, &u32p->f_tfree) ||
__put_user((__u32) u.f_tinode, &u32p->f_tfree) ||
__copy_to_user(&u32p->f_fname, u.f_fname, sizeof(u.f_fname)) ||
__copy_to_user(&u32p->f_fpack, u.f_fpack, sizeof(u.f_fpack)))
ret = -EFAULT;
return ret;
}
asmlinkage long sys32_execve(char __user *name, compat_uptr_t __user *argv,
compat_uptr_t __user *envp, struct pt_regs *regs)
{

Просмотреть файл

@ -129,13 +129,6 @@ typedef struct compat_siginfo {
} _sifields;
} compat_siginfo_t;
struct ustat32 {
__u32 f_tfree;
compat_ino_t f_tinode;
char f_fname[6];
char f_fpack[6];
};
#define IA32_STACK_TOP IA32_PAGE_OFFSET
#ifdef __KERNEL__

Просмотреть файл

@ -70,8 +70,6 @@ struct old_utsname;
asmlinkage long sys32_olduname(struct oldold_utsname __user *);
long sys32_uname(struct old_utsname __user *);
long sys32_ustat(unsigned, struct ustat32 __user *);
asmlinkage long sys32_execve(char __user *, compat_uptr_t __user *,
compat_uptr_t __user *, struct pt_regs *);
asmlinkage long sys32_clone(unsigned int, unsigned int, struct pt_regs *);

Просмотреть файл

@ -26,6 +26,10 @@
#define PCI_DEVICE_ID_INTEL_82965GME_IG 0x2A12
#define PCI_DEVICE_ID_INTEL_82945GME_HB 0x27AC
#define PCI_DEVICE_ID_INTEL_82945GME_IG 0x27AE
#define PCI_DEVICE_ID_INTEL_IGDGM_HB 0xA010
#define PCI_DEVICE_ID_INTEL_IGDGM_IG 0xA011
#define PCI_DEVICE_ID_INTEL_IGDG_HB 0xA000
#define PCI_DEVICE_ID_INTEL_IGDG_IG 0xA001
#define PCI_DEVICE_ID_INTEL_G33_HB 0x29C0
#define PCI_DEVICE_ID_INTEL_G33_IG 0x29C2
#define PCI_DEVICE_ID_INTEL_Q35_HB 0x29B0
@ -60,7 +64,12 @@
#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB)
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
@ -510,7 +519,7 @@ static void intel_i830_init_gtt_entries(void)
size = 512;
}
size += 4; /* add in BIOS popup space */
} else if (IS_G33) {
} else if (IS_G33 && !IS_IGD) {
/* G33's GTT size defined in gmch_ctrl */
switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
case G33_PGETBL_SIZE_1M:
@ -526,7 +535,7 @@ static void intel_i830_init_gtt_entries(void)
size = 512;
}
size += 4;
} else if (IS_G4X) {
} else if (IS_G4X || IS_IGD) {
/* On 4 series hardware, GTT stolen is separate from graphics
* stolen, ignore it in stolen gtt entries counting. However,
* 4KB of the stolen memory doesn't get mapped to the GTT.
@ -2161,6 +2170,10 @@ static const struct intel_driver_description {
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD",
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD",
NULL, &intel_g33_driver },
{ PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
"Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
{ PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
@ -2355,6 +2368,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
ID(PCI_DEVICE_ID_INTEL_82945G_HB),
ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
ID(PCI_DEVICE_ID_INTEL_IGDGM_HB),
ID(PCI_DEVICE_ID_INTEL_IGDG_HB),
ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
ID(PCI_DEVICE_ID_INTEL_82G35_HB),
ID(PCI_DEVICE_ID_INTEL_82965Q_HB),

Просмотреть файл

@ -63,8 +63,7 @@ static int descriptor_count;
#define BIB_CMC ((1) << 30)
#define BIB_IMC ((1) << 31)
static u32 *
generate_config_rom(struct fw_card *card, size_t *config_rom_length)
static u32 *generate_config_rom(struct fw_card *card, size_t *config_rom_length)
{
struct fw_descriptor *desc;
static u32 config_rom[256];
@ -128,8 +127,7 @@ generate_config_rom(struct fw_card *card, size_t *config_rom_length)
return config_rom;
}
static void
update_config_roms(void)
static void update_config_roms(void)
{
struct fw_card *card;
u32 *config_rom;
@ -141,8 +139,7 @@ update_config_roms(void)
}
}
int
fw_core_add_descriptor(struct fw_descriptor *desc)
int fw_core_add_descriptor(struct fw_descriptor *desc)
{
size_t i;
@ -171,8 +168,7 @@ fw_core_add_descriptor(struct fw_descriptor *desc)
return 0;
}
void
fw_core_remove_descriptor(struct fw_descriptor *desc)
void fw_core_remove_descriptor(struct fw_descriptor *desc)
{
mutex_lock(&card_mutex);
@ -185,12 +181,30 @@ fw_core_remove_descriptor(struct fw_descriptor *desc)
mutex_unlock(&card_mutex);
}
static int set_broadcast_channel(struct device *dev, void *data)
{
fw_device_set_broadcast_channel(fw_device(dev), (long)data);
return 0;
}
static void allocate_broadcast_channel(struct fw_card *card, int generation)
{
int channel, bandwidth = 0;
fw_iso_resource_manage(card, generation, 1ULL << 31,
&channel, &bandwidth, true);
if (channel == 31) {
card->broadcast_channel_allocated = true;
device_for_each_child(card->device, (void *)(long)generation,
set_broadcast_channel);
}
}
static const char gap_count_table[] = {
63, 5, 7, 8, 10, 13, 16, 18, 21, 24, 26, 29, 32, 35, 37, 40
};
void
fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
void fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
{
int scheduled;
@ -200,37 +214,38 @@ fw_schedule_bm_work(struct fw_card *card, unsigned long delay)
fw_card_put(card);
}
static void
fw_card_bm_work(struct work_struct *work)
static void fw_card_bm_work(struct work_struct *work)
{
struct fw_card *card = container_of(work, struct fw_card, work.work);
struct fw_device *root_device;
struct fw_node *root_node, *local_node;
struct fw_node *root_node;
unsigned long flags;
int root_id, new_root_id, irm_id, gap_count, generation, grace, rcode;
int root_id, new_root_id, irm_id, local_id;
int gap_count, generation, grace, rcode;
bool do_reset = false;
bool root_device_is_running;
bool root_device_is_cmc;
__be32 lock_data[2];
spin_lock_irqsave(&card->lock, flags);
local_node = card->local_node;
root_node = card->root_node;
if (local_node == NULL) {
if (card->local_node == NULL) {
spin_unlock_irqrestore(&card->lock, flags);
goto out_put_card;
}
fw_node_get(local_node);
fw_node_get(root_node);
generation = card->generation;
root_node = card->root_node;
fw_node_get(root_node);
root_device = root_node->data;
root_device_is_running = root_device &&
atomic_read(&root_device->state) == FW_DEVICE_RUNNING;
root_device_is_cmc = root_device && root_device->cmc;
root_id = root_node->node_id;
grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 10));
root_id = root_node->node_id;
irm_id = card->irm_node->node_id;
local_id = card->local_node->node_id;
grace = time_after(jiffies, card->reset_jiffies + DIV_ROUND_UP(HZ, 8));
if (is_next_generation(generation, card->bm_generation) ||
(card->bm_generation != generation && grace)) {
@ -246,16 +261,15 @@ fw_card_bm_work(struct work_struct *work)
* next generation.
*/
irm_id = card->irm_node->node_id;
if (!card->irm_node->link_on) {
new_root_id = local_node->node_id;
new_root_id = local_id;
fw_notify("IRM has link off, making local node (%02x) root.\n",
new_root_id);
goto pick_me;
}
lock_data[0] = cpu_to_be32(0x3f);
lock_data[1] = cpu_to_be32(local_node->node_id);
lock_data[1] = cpu_to_be32(local_id);
spin_unlock_irqrestore(&card->lock, flags);
@ -269,9 +283,14 @@ fw_card_bm_work(struct work_struct *work)
goto out;
if (rcode == RCODE_COMPLETE &&
lock_data[0] != cpu_to_be32(0x3f))
/* Somebody else is BM, let them do the work. */
lock_data[0] != cpu_to_be32(0x3f)) {
/* Somebody else is BM. Only act as IRM. */
if (local_id == irm_id)
allocate_broadcast_channel(card, generation);
goto out;
}
spin_lock_irqsave(&card->lock, flags);
@ -282,19 +301,18 @@ fw_card_bm_work(struct work_struct *work)
* do a bus reset and pick the local node as
* root, and thus, IRM.
*/
new_root_id = local_node->node_id;
new_root_id = local_id;
fw_notify("BM lock failed, making local node (%02x) root.\n",
new_root_id);
goto pick_me;
}
} else if (card->bm_generation != generation) {
/*
* OK, we weren't BM in the last generation, and it's
* less than 100ms since last bus reset. Reschedule
* this task 100ms from now.
* We weren't BM in the last generation, and the last
* bus reset is less than 125ms ago. Reschedule this job.
*/
spin_unlock_irqrestore(&card->lock, flags);
fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 10));
fw_schedule_bm_work(card, DIV_ROUND_UP(HZ, 8));
goto out;
}
@ -310,7 +328,7 @@ fw_card_bm_work(struct work_struct *work)
* Either link_on is false, or we failed to read the
* config rom. In either case, pick another root.
*/
new_root_id = local_node->node_id;
new_root_id = local_id;
} else if (!root_device_is_running) {
/*
* If we haven't probed this device yet, bail out now
@ -332,7 +350,7 @@ fw_card_bm_work(struct work_struct *work)
* successfully read the config rom, but it's not
* cycle master capable.
*/
new_root_id = local_node->node_id;
new_root_id = local_id;
}
pick_me:
@ -363,25 +381,28 @@ fw_card_bm_work(struct work_struct *work)
card->index, new_root_id, gap_count);
fw_send_phy_config(card, new_root_id, generation, gap_count);
fw_core_initiate_bus_reset(card, 1);
/* Will allocate broadcast channel after the reset. */
} else {
if (local_id == irm_id)
allocate_broadcast_channel(card, generation);
}
out:
fw_node_put(root_node);
fw_node_put(local_node);
out_put_card:
fw_card_put(card);
}
static void
flush_timer_callback(unsigned long data)
static void flush_timer_callback(unsigned long data)
{
struct fw_card *card = (struct fw_card *)data;
fw_flush_transactions(card);
}
void
fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
struct device *device)
void fw_card_initialize(struct fw_card *card,
const struct fw_card_driver *driver,
struct device *device)
{
static atomic_t index = ATOMIC_INIT(-1);
@ -406,13 +427,12 @@ fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
}
EXPORT_SYMBOL(fw_card_initialize);
int
fw_card_add(struct fw_card *card,
u32 max_receive, u32 link_speed, u64 guid)
int fw_card_add(struct fw_card *card,
u32 max_receive, u32 link_speed, u64 guid)
{
u32 *config_rom;
size_t length;
int err;
int ret;
card->max_receive = max_receive;
card->link_speed = link_speed;
@ -423,13 +443,14 @@ fw_card_add(struct fw_card *card,
list_add_tail(&card->link, &card_list);
mutex_unlock(&card_mutex);
err = card->driver->enable(card, config_rom, length);
if (err < 0) {
ret = card->driver->enable(card, config_rom, length);
if (ret < 0) {
mutex_lock(&card_mutex);
list_del(&card->link);
mutex_unlock(&card_mutex);
}
return err;
return ret;
}
EXPORT_SYMBOL(fw_card_add);
@ -442,23 +463,20 @@ EXPORT_SYMBOL(fw_card_add);
* dummy driver just fails all IO.
*/
static int
dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
static int dummy_enable(struct fw_card *card, u32 *config_rom, size_t length)
{
BUG();
return -1;
}
static int
dummy_update_phy_reg(struct fw_card *card, int address,
int clear_bits, int set_bits)
static int dummy_update_phy_reg(struct fw_card *card, int address,
int clear_bits, int set_bits)
{
return -ENODEV;
}
static int
dummy_set_config_rom(struct fw_card *card,
u32 *config_rom, size_t length)
static int dummy_set_config_rom(struct fw_card *card,
u32 *config_rom, size_t length)
{
/*
* We take the card out of card_list before setting the dummy
@ -468,27 +486,23 @@ dummy_set_config_rom(struct fw_card *card,
return -1;
}
static void
dummy_send_request(struct fw_card *card, struct fw_packet *packet)
static void dummy_send_request(struct fw_card *card, struct fw_packet *packet)
{
packet->callback(packet, card, -ENODEV);
}
static void
dummy_send_response(struct fw_card *card, struct fw_packet *packet)
static void dummy_send_response(struct fw_card *card, struct fw_packet *packet)
{
packet->callback(packet, card, -ENODEV);
}
static int
dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
static int dummy_cancel_packet(struct fw_card *card, struct fw_packet *packet)
{
return -ENOENT;
}
static int
dummy_enable_phys_dma(struct fw_card *card,
int node_id, int generation)
static int dummy_enable_phys_dma(struct fw_card *card,
int node_id, int generation)
{
return -ENODEV;
}
@ -503,16 +517,14 @@ static struct fw_card_driver dummy_driver = {
.enable_phys_dma = dummy_enable_phys_dma,
};
void
fw_card_release(struct kref *kref)
void fw_card_release(struct kref *kref)
{
struct fw_card *card = container_of(kref, struct fw_card, kref);
complete(&card->done);
}
void
fw_core_remove_card(struct fw_card *card)
void fw_core_remove_card(struct fw_card *card)
{
card->driver->update_phy_reg(card, 4,
PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
@ -536,8 +548,7 @@ fw_core_remove_card(struct fw_card *card)
}
EXPORT_SYMBOL(fw_core_remove_card);
int
fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset)
{
int reg = short_reset ? 5 : 1;
int bit = short_reset ? PHY_BUS_SHORT_RESET : PHY_BUS_RESET;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -18,22 +18,26 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/module.h>
#include <linux/wait.h>
#include <linux/errno.h>
#include <linux/kthread.h>
#include <linux/device.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/idr.h>
#include <linux/jiffies.h>
#include <linux/string.h>
#include <linux/kobject.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/semaphore.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/workqueue.h>
#include <asm/system.h>
#include <linux/ctype.h>
#include "fw-transaction.h"
#include "fw-topology.h"
#include "fw-device.h"
#include "fw-topology.h"
#include "fw-transaction.h"
void fw_csr_iterator_init(struct fw_csr_iterator *ci, u32 * p)
{
@ -132,8 +136,7 @@ static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
vendor, model, specifier_id, version);
}
static int
fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
{
struct fw_unit *unit = fw_unit(dev);
char modalias[64];
@ -152,27 +155,6 @@ struct bus_type fw_bus_type = {
};
EXPORT_SYMBOL(fw_bus_type);
static void fw_device_release(struct device *dev)
{
struct fw_device *device = fw_device(dev);
struct fw_card *card = device->card;
unsigned long flags;
/*
* Take the card lock so we don't set this to NULL while a
* FW_NODE_UPDATED callback is being handled or while the
* bus manager work looks at this node.
*/
spin_lock_irqsave(&card->lock, flags);
device->node->data = NULL;
spin_unlock_irqrestore(&card->lock, flags);
fw_node_put(device->node);
kfree(device->config_rom);
kfree(device);
fw_card_put(card);
}
int fw_device_enable_phys_dma(struct fw_device *device)
{
int generation = device->generation;
@ -191,8 +173,8 @@ struct config_rom_attribute {
u32 key;
};
static ssize_t
show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
static ssize_t show_immediate(struct device *dev,
struct device_attribute *dattr, char *buf)
{
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
@ -223,8 +205,8 @@ show_immediate(struct device *dev, struct device_attribute *dattr, char *buf)
#define IMMEDIATE_ATTR(name, key) \
{ __ATTR(name, S_IRUGO, show_immediate, NULL), key }
static ssize_t
show_text_leaf(struct device *dev, struct device_attribute *dattr, char *buf)
static ssize_t show_text_leaf(struct device *dev,
struct device_attribute *dattr, char *buf)
{
struct config_rom_attribute *attr =
container_of(dattr, struct config_rom_attribute, attr);
@ -293,10 +275,9 @@ static struct config_rom_attribute config_rom_attributes[] = {
TEXT_LEAF_ATTR(hardware_version_name, CSR_HARDWARE_VERSION),
};
static void
init_fw_attribute_group(struct device *dev,
struct device_attribute *attrs,
struct fw_attribute_group *group)
static void init_fw_attribute_group(struct device *dev,
struct device_attribute *attrs,
struct fw_attribute_group *group)
{
struct device_attribute *attr;
int i, j;
@ -319,9 +300,8 @@ init_fw_attribute_group(struct device *dev,
dev->groups = group->groups;
}
static ssize_t
modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t modalias_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_unit *unit = fw_unit(dev);
int length;
@ -332,9 +312,8 @@ modalias_show(struct device *dev,
return length + 1;
}
static ssize_t
rom_index_show(struct device *dev,
struct device_attribute *attr, char *buf)
static ssize_t rom_index_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev->parent);
struct fw_unit *unit = fw_unit(dev);
@ -349,8 +328,8 @@ static struct device_attribute fw_unit_attributes[] = {
__ATTR_NULL,
};
static ssize_t
config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t config_rom_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
size_t length;
@ -363,8 +342,8 @@ config_rom_show(struct device *dev, struct device_attribute *attr, char *buf)
return length;
}
static ssize_t
guid_show(struct device *dev, struct device_attribute *attr, char *buf)
static ssize_t guid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fw_device *device = fw_device(dev);
int ret;
@ -383,8 +362,8 @@ static struct device_attribute fw_device_attributes[] = {
__ATTR_NULL,
};
static int
read_rom(struct fw_device *device, int generation, int index, u32 *data)
static int read_rom(struct fw_device *device,
int generation, int index, u32 *data)
{
int rcode;
@ -539,7 +518,7 @@ static int read_bus_info_block(struct fw_device *device, int generation)
kfree(old_rom);
ret = 0;
device->cmc = rom[2] & 1 << 30;
device->cmc = rom[2] >> 30 & 1;
out:
kfree(rom);
@ -679,11 +658,53 @@ static void fw_device_shutdown(struct work_struct *work)
fw_device_put(device);
}
static void fw_device_release(struct device *dev)
{
struct fw_device *device = fw_device(dev);
struct fw_card *card = device->card;
unsigned long flags;
/*
* Take the card lock so we don't set this to NULL while a
* FW_NODE_UPDATED callback is being handled or while the
* bus manager work looks at this node.
*/
spin_lock_irqsave(&card->lock, flags);
device->node->data = NULL;
spin_unlock_irqrestore(&card->lock, flags);
fw_node_put(device->node);
kfree(device->config_rom);
kfree(device);
fw_card_put(card);
}
static struct device_type fw_device_type = {
.release = fw_device_release,
.release = fw_device_release,
};
static void fw_device_update(struct work_struct *work);
static int update_unit(struct device *dev, void *data)
{
struct fw_unit *unit = fw_unit(dev);
struct fw_driver *driver = (struct fw_driver *)dev->driver;
if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
down(&dev->sem);
driver->update(unit);
up(&dev->sem);
}
return 0;
}
static void fw_device_update(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
fw_device_cdev_update(device);
device_for_each_child(&device->device, NULL, update_unit);
}
/*
* If a device was pending for deletion because its node went away but its
@ -735,12 +756,50 @@ static int lookup_existing_device(struct device *dev, void *data)
return match;
}
enum { BC_UNKNOWN = 0, BC_UNIMPLEMENTED, BC_IMPLEMENTED, };
void fw_device_set_broadcast_channel(struct fw_device *device, int generation)
{
struct fw_card *card = device->card;
__be32 data;
int rcode;
if (!card->broadcast_channel_allocated)
return;
if (device->bc_implemented == BC_UNKNOWN) {
rcode = fw_run_transaction(card, TCODE_READ_QUADLET_REQUEST,
device->node_id, generation, device->max_speed,
CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
&data, 4);
switch (rcode) {
case RCODE_COMPLETE:
if (data & cpu_to_be32(1 << 31)) {
device->bc_implemented = BC_IMPLEMENTED;
break;
}
/* else fall through to case address error */
case RCODE_ADDRESS_ERROR:
device->bc_implemented = BC_UNIMPLEMENTED;
}
}
if (device->bc_implemented == BC_IMPLEMENTED) {
data = cpu_to_be32(BROADCAST_CHANNEL_INITIAL |
BROADCAST_CHANNEL_VALID);
fw_run_transaction(card, TCODE_WRITE_QUADLET_REQUEST,
device->node_id, generation, device->max_speed,
CSR_REGISTER_BASE + CSR_BROADCAST_CHANNEL,
&data, 4);
}
}
static void fw_device_init(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
struct device *revived_dev;
int minor, err;
int minor, ret;
/*
* All failure paths here set node->data to NULL, so that we
@ -776,12 +835,12 @@ static void fw_device_init(struct work_struct *work)
fw_device_get(device);
down_write(&fw_device_rwsem);
err = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
ret = idr_pre_get(&fw_device_idr, GFP_KERNEL) ?
idr_get_new(&fw_device_idr, device, &minor) :
-ENOMEM;
up_write(&fw_device_rwsem);
if (err < 0)
if (ret < 0)
goto error;
device->device.bus = &fw_bus_type;
@ -828,6 +887,8 @@ static void fw_device_init(struct work_struct *work)
device->config_rom[3], device->config_rom[4],
1 << device->max_speed);
device->config_rom_retries = 0;
fw_device_set_broadcast_channel(device, device->generation);
}
/*
@ -851,29 +912,6 @@ static void fw_device_init(struct work_struct *work)
put_device(&device->device); /* our reference */
}
static int update_unit(struct device *dev, void *data)
{
struct fw_unit *unit = fw_unit(dev);
struct fw_driver *driver = (struct fw_driver *)dev->driver;
if (is_fw_unit(dev) && driver != NULL && driver->update != NULL) {
down(&dev->sem);
driver->update(unit);
up(&dev->sem);
}
return 0;
}
static void fw_device_update(struct work_struct *work)
{
struct fw_device *device =
container_of(work, struct fw_device, work.work);
fw_device_cdev_update(device);
device_for_each_child(&device->device, NULL, update_unit);
}
enum {
REREAD_BIB_ERROR,
REREAD_BIB_GONE,
@ -894,7 +932,7 @@ static int reread_bus_info_block(struct fw_device *device, int generation)
if (i == 0 && q == 0)
return REREAD_BIB_GONE;
if (i > device->config_rom_length || q != device->config_rom[i])
if (q != device->config_rom[i])
return REREAD_BIB_CHANGED;
}
@ -1004,6 +1042,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
device->node = fw_node_get(node);
device->node_id = node->node_id;
device->generation = card->generation;
mutex_init(&device->client_list_mutex);
INIT_LIST_HEAD(&device->client_list);
/*

Просмотреть файл

@ -19,10 +19,17 @@
#ifndef __fw_device_h
#define __fw_device_h
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/cdev.h>
#include <linux/idr.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/rwsem.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include <asm/atomic.h>
enum fw_device_state {
@ -38,6 +45,9 @@ struct fw_attribute_group {
struct attribute *attrs[11];
};
struct fw_node;
struct fw_card;
/*
* Note, fw_device.generation always has to be read before fw_device.node_id.
* Use SMP memory barriers to ensure this. Otherwise requests will be sent
@ -61,13 +71,18 @@ struct fw_device {
int node_id;
int generation;
unsigned max_speed;
bool cmc;
struct fw_card *card;
struct device device;
struct mutex client_list_mutex;
struct list_head client_list;
u32 *config_rom;
size_t config_rom_length;
int config_rom_retries;
unsigned cmc:1;
unsigned bc_implemented:2;
struct delayed_work work;
struct fw_attribute_group attribute_group;
};
@ -96,6 +111,7 @@ static inline void fw_device_put(struct fw_device *device)
struct fw_device *fw_device_get_by_devt(dev_t devt);
int fw_device_enable_phys_dma(struct fw_device *device);
void fw_device_set_broadcast_channel(struct fw_device *device, int generation);
void fw_device_cdev_update(struct fw_device *device);
void fw_device_cdev_remove(struct fw_device *device);
@ -176,8 +192,7 @@ struct fw_driver {
const struct fw_device_id *id_table;
};
static inline struct fw_driver *
fw_driver(struct device_driver *drv)
static inline struct fw_driver *fw_driver(struct device_driver *drv)
{
return container_of(drv, struct fw_driver, driver);
}

Просмотреть файл

@ -1,5 +1,7 @@
/*
* Isochronous IO functionality
* Isochronous I/O functionality:
* - Isochronous DMA context management
* - Isochronous bus resource management (channels, bandwidth), client side
*
* Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net>
*
@ -18,21 +20,25 @@
* Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/dma-mapping.h>
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include <linux/firewire-constants.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include "fw-transaction.h"
#include "fw-topology.h"
#include "fw-device.h"
#include "fw-transaction.h"
int
fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
int page_count, enum dma_data_direction direction)
/*
* Isochronous DMA context management
*/
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
int page_count, enum dma_data_direction direction)
{
int i, j, retval = -ENOMEM;
int i, j;
dma_addr_t address;
buffer->page_count = page_count;
@ -69,19 +75,21 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
kfree(buffer->pages);
out:
buffer->pages = NULL;
return retval;
return -ENOMEM;
}
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma)
{
unsigned long uaddr;
int i, retval;
int i, err;
uaddr = vma->vm_start;
for (i = 0; i < buffer->page_count; i++) {
retval = vm_insert_page(vma, uaddr, buffer->pages[i]);
if (retval)
return retval;
err = vm_insert_page(vma, uaddr, buffer->pages[i]);
if (err)
return err;
uaddr += PAGE_SIZE;
}
@ -105,14 +113,14 @@ void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer,
buffer->pages = NULL;
}
struct fw_iso_context *
fw_iso_context_create(struct fw_card *card, int type,
int channel, int speed, size_t header_size,
fw_iso_callback_t callback, void *callback_data)
struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
int type, int channel, int speed, size_t header_size,
fw_iso_callback_t callback, void *callback_data)
{
struct fw_iso_context *ctx;
ctx = card->driver->allocate_iso_context(card, type, header_size);
ctx = card->driver->allocate_iso_context(card,
type, channel, header_size);
if (IS_ERR(ctx))
return ctx;
@ -134,25 +142,186 @@ void fw_iso_context_destroy(struct fw_iso_context *ctx)
card->driver->free_iso_context(ctx);
}
int
fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags)
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags)
{
return ctx->card->driver->start_iso(ctx, cycle, sync, tags);
}
int
fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
struct fw_card *card = ctx->card;
return card->driver->queue_iso(ctx, packet, buffer, payload);
}
int
fw_iso_context_stop(struct fw_iso_context *ctx)
int fw_iso_context_stop(struct fw_iso_context *ctx)
{
return ctx->card->driver->stop_iso(ctx);
}
/*
* Isochronous bus resource management (channels, bandwidth), client side
*/
static int manage_bandwidth(struct fw_card *card, int irm_id, int generation,
int bandwidth, bool allocate)
{
__be32 data[2];
int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0;
/*
* On a 1394a IRM with low contention, try < 1 is enough.
* On a 1394-1995 IRM, we need at least try < 2.
* Let's just do try < 5.
*/
for (try = 0; try < 5; try++) {
new = allocate ? old - bandwidth : old + bandwidth;
if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL)
break;
data[0] = cpu_to_be32(old);
data[1] = cpu_to_be32(new);
switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE,
data, sizeof(data))) {
case RCODE_GENERATION:
/* A generation change frees all bandwidth. */
return allocate ? -EAGAIN : bandwidth;
case RCODE_COMPLETE:
if (be32_to_cpup(data) == old)
return bandwidth;
old = be32_to_cpup(data);
/* Fall through. */
}
}
return -EIO;
}
static int manage_channel(struct fw_card *card, int irm_id, int generation,
u32 channels_mask, u64 offset, bool allocate)
{
__be32 data[2], c, all, old;
int i, retry = 5;
old = all = allocate ? cpu_to_be32(~0) : 0;
for (i = 0; i < 32; i++) {
if (!(channels_mask & 1 << i))
continue;
c = cpu_to_be32(1 << (31 - i));
if ((old & c) != (all & c))
continue;
data[0] = old;
data[1] = old ^ c;
switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP,
irm_id, generation, SCODE_100,
offset, data, sizeof(data))) {
case RCODE_GENERATION:
/* A generation change frees all channels. */
return allocate ? -EAGAIN : i;
case RCODE_COMPLETE:
if (data[0] == old)
return i;
old = data[0];
/* Is the IRM 1394a-2000 compliant? */
if ((data[0] & c) == (data[1] & c))
continue;
/* 1394-1995 IRM, fall through to retry. */
default:
if (retry--)
i--;
}
}
return -EIO;
}
static void deallocate_channel(struct fw_card *card, int irm_id,
int generation, int channel)
{
u32 mask;
u64 offset;
mask = channel < 32 ? 1 << channel : 1 << (channel - 32);
offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI :
CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO;
manage_channel(card, irm_id, generation, mask, offset, false);
}
/**
* fw_iso_resource_manage - Allocate or deallocate a channel and/or bandwidth
*
* In parameters: card, generation, channels_mask, bandwidth, allocate
* Out parameters: channel, bandwidth
* This function blocks (sleeps) during communication with the IRM.
*
* Allocates or deallocates at most one channel out of channels_mask.
* channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0.
* (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for
* channel 0 and LSB for channel 63.)
* Allocates or deallocates as many bandwidth allocation units as specified.
*
* Returns channel < 0 if no channel was allocated or deallocated.
* Returns bandwidth = 0 if no bandwidth was allocated or deallocated.
*
* If generation is stale, deallocations succeed but allocations fail with
* channel = -EAGAIN.
*
* If channel allocation fails, no bandwidth will be allocated either.
* If bandwidth allocation fails, no channel will be allocated either.
* But deallocations of channel and bandwidth are tried independently
* of each other's success.
*/
void fw_iso_resource_manage(struct fw_card *card, int generation,
u64 channels_mask, int *channel, int *bandwidth,
bool allocate)
{
u32 channels_hi = channels_mask; /* channels 31...0 */
u32 channels_lo = channels_mask >> 32; /* channels 63...32 */
int irm_id, ret, c = -EINVAL;
spin_lock_irq(&card->lock);
irm_id = card->irm_node->node_id;
spin_unlock_irq(&card->lock);
if (channels_hi)
c = manage_channel(card, irm_id, generation, channels_hi,
CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, allocate);
if (channels_lo && c < 0) {
c = manage_channel(card, irm_id, generation, channels_lo,
CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, allocate);
if (c >= 0)
c += 32;
}
*channel = c;
if (allocate && channels_mask != 0 && c < 0)
*bandwidth = 0;
if (*bandwidth == 0)
return;
ret = manage_bandwidth(card, irm_id, generation, *bandwidth, allocate);
if (ret < 0)
*bandwidth = 0;
if (allocate && ret < 0 && c >= 0) {
deallocate_channel(card, irm_id, generation, c);
*channel = ret;
}
}

Просмотреть файл

@ -205,6 +205,7 @@ struct fw_ohci {
u32 it_context_mask;
struct iso_context *it_context_list;
u64 ir_context_channels;
u32 ir_context_mask;
struct iso_context *ir_context_list;
};
@ -441,9 +442,8 @@ static inline void flush_writes(const struct fw_ohci *ohci)
reg_read(ohci, OHCI1394_Version);
}
static int
ohci_update_phy_reg(struct fw_card *card, int addr,
int clear_bits, int set_bits)
static int ohci_update_phy_reg(struct fw_card *card, int addr,
int clear_bits, int set_bits)
{
struct fw_ohci *ohci = fw_ohci(card);
u32 val, old;
@ -658,8 +658,8 @@ static void ar_context_tasklet(unsigned long data)
}
}
static int
ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, u32 regs)
static int ar_context_init(struct ar_context *ctx,
struct fw_ohci *ohci, u32 regs)
{
struct ar_buffer ab;
@ -690,8 +690,7 @@ static void ar_context_run(struct ar_context *ctx)
flush_writes(ctx->ohci);
}
static struct descriptor *
find_branch_descriptor(struct descriptor *d, int z)
static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
{
int b, key;
@ -751,8 +750,7 @@ static void context_tasklet(unsigned long data)
* Allocate a new buffer and add it to the list of free buffers for this
* context. Must be called with ohci->lock held.
*/
static int
context_add_buffer(struct context *ctx)
static int context_add_buffer(struct context *ctx)
{
struct descriptor_buffer *desc;
dma_addr_t uninitialized_var(bus_addr);
@ -781,9 +779,8 @@ context_add_buffer(struct context *ctx)
return 0;
}
static int
context_init(struct context *ctx, struct fw_ohci *ohci,
u32 regs, descriptor_callback_t callback)
static int context_init(struct context *ctx, struct fw_ohci *ohci,
u32 regs, descriptor_callback_t callback)
{
ctx->ohci = ohci;
ctx->regs = regs;
@ -814,8 +811,7 @@ context_init(struct context *ctx, struct fw_ohci *ohci,
return 0;
}
static void
context_release(struct context *ctx)
static void context_release(struct context *ctx)
{
struct fw_card *card = &ctx->ohci->card;
struct descriptor_buffer *desc, *tmp;
@ -827,8 +823,8 @@ context_release(struct context *ctx)
}
/* Must be called with ohci->lock held */
static struct descriptor *
context_get_descriptors(struct context *ctx, int z, dma_addr_t *d_bus)
static struct descriptor *context_get_descriptors(struct context *ctx,
int z, dma_addr_t *d_bus)
{
struct descriptor *d = NULL;
struct descriptor_buffer *desc = ctx->buffer_tail;
@ -912,8 +908,8 @@ struct driver_data {
* Must always be called with the ochi->lock held to ensure proper
* generation handling and locking around packet queue manipulation.
*/
static int
at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
static int at_context_queue_packet(struct context *ctx,
struct fw_packet *packet)
{
struct fw_ohci *ohci = ctx->ohci;
dma_addr_t d_bus, uninitialized_var(payload_bus);
@ -940,7 +936,9 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
*/
header = (__le32 *) &d[1];
if (packet->header_length > 8) {
switch (packet->header_length) {
case 16:
case 12:
header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
(packet->speed << 16));
header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
@ -954,12 +952,27 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
header[3] = (__force __le32) packet->header[3];
d[0].req_count = cpu_to_le16(packet->header_length);
} else {
break;
case 8:
header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
(packet->speed << 16));
header[1] = cpu_to_le32(packet->header[0]);
header[2] = cpu_to_le32(packet->header[1]);
d[0].req_count = cpu_to_le16(12);
break;
case 4:
header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
(packet->speed << 16));
header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
d[0].req_count = cpu_to_le16(8);
break;
default:
/* BUG(); */
packet->ack = RCODE_SEND_ERROR;
return -1;
}
driver_data = (struct driver_data *) &d[3];
@ -1095,8 +1108,8 @@ static int handle_at_packet(struct context *context,
#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
static void
handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
static void handle_local_rom(struct fw_ohci *ohci,
struct fw_packet *packet, u32 csr)
{
struct fw_packet response;
int tcode, length, i;
@ -1122,8 +1135,8 @@ handle_local_rom(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
fw_core_handle_response(&ohci->card, &response);
}
static void
handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
static void handle_local_lock(struct fw_ohci *ohci,
struct fw_packet *packet, u32 csr)
{
struct fw_packet response;
int tcode, length, ext_tcode, sel;
@ -1164,8 +1177,7 @@ handle_local_lock(struct fw_ohci *ohci, struct fw_packet *packet, u32 csr)
fw_core_handle_response(&ohci->card, &response);
}
static void
handle_local_request(struct context *ctx, struct fw_packet *packet)
static void handle_local_request(struct context *ctx, struct fw_packet *packet)
{
u64 offset;
u32 csr;
@ -1205,11 +1217,10 @@ handle_local_request(struct context *ctx, struct fw_packet *packet)
}
}
static void
at_context_transmit(struct context *ctx, struct fw_packet *packet)
static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
{
unsigned long flags;
int retval;
int ret;
spin_lock_irqsave(&ctx->ohci->lock, flags);
@ -1220,10 +1231,10 @@ at_context_transmit(struct context *ctx, struct fw_packet *packet)
return;
}
retval = at_context_queue_packet(ctx, packet);
ret = at_context_queue_packet(ctx, packet);
spin_unlock_irqrestore(&ctx->ohci->lock, flags);
if (retval < 0)
if (ret < 0)
packet->callback(packet, &ctx->ohci->card, packet->ack);
}
@ -1590,12 +1601,12 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
return 0;
}
static int
ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
static int ohci_set_config_rom(struct fw_card *card,
u32 *config_rom, size_t length)
{
struct fw_ohci *ohci;
unsigned long flags;
int retval = -EBUSY;
int ret = -EBUSY;
__be32 *next_config_rom;
dma_addr_t uninitialized_var(next_config_rom_bus);
@ -1649,7 +1660,7 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
reg_write(ohci, OHCI1394_ConfigROMmap,
ohci->next_config_rom_bus);
retval = 0;
ret = 0;
}
spin_unlock_irqrestore(&ohci->lock, flags);
@ -1661,13 +1672,13 @@ ohci_set_config_rom(struct fw_card *card, u32 *config_rom, size_t length)
* controller could need to access it before the bus reset
* takes effect.
*/
if (retval == 0)
if (ret == 0)
fw_core_initiate_bus_reset(&ohci->card, 1);
else
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
next_config_rom, next_config_rom_bus);
return retval;
return ret;
}
static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
@ -1689,7 +1700,7 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
struct fw_ohci *ohci = fw_ohci(card);
struct context *ctx = &ohci->at_request_ctx;
struct driver_data *driver_data = packet->driver_data;
int retval = -ENOENT;
int ret = -ENOENT;
tasklet_disable(&ctx->tasklet);
@ -1704,23 +1715,22 @@ static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
driver_data->packet = NULL;
packet->ack = RCODE_CANCELLED;
packet->callback(packet, &ohci->card, packet->ack);
retval = 0;
ret = 0;
out:
tasklet_enable(&ctx->tasklet);
return retval;
return ret;
}
static int
ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
static int ohci_enable_phys_dma(struct fw_card *card,
int node_id, int generation)
{
#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
return 0;
#else
struct fw_ohci *ohci = fw_ohci(card);
unsigned long flags;
int n, retval = 0;
int n, ret = 0;
/*
* FIXME: Make sure this bitmask is cleared when we clear the busReset
@ -1730,7 +1740,7 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
spin_lock_irqsave(&ohci->lock, flags);
if (ohci->generation != generation) {
retval = -ESTALE;
ret = -ESTALE;
goto out;
}
@ -1748,12 +1758,12 @@ ohci_enable_phys_dma(struct fw_card *card, int node_id, int generation)
flush_writes(ohci);
out:
spin_unlock_irqrestore(&ohci->lock, flags);
return retval;
return ret;
#endif /* CONFIG_FIREWIRE_OHCI_REMOTE_DMA */
}
static u64
ohci_get_bus_time(struct fw_card *card)
static u64 ohci_get_bus_time(struct fw_card *card)
{
struct fw_ohci *ohci = fw_ohci(card);
u32 cycle_time;
@ -1765,6 +1775,28 @@ ohci_get_bus_time(struct fw_card *card)
return bus_time;
}
static void copy_iso_headers(struct iso_context *ctx, void *p)
{
int i = ctx->header_length;
if (i + ctx->base.header_size > PAGE_SIZE)
return;
/*
* The iso header is byteswapped to little endian by
* the controller, but the remaining header quadlets
* are big endian. We want to present all the headers
* as big endian, so we have to swap the first quadlet.
*/
if (ctx->base.header_size > 0)
*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
if (ctx->base.header_size > 4)
*(u32 *) (ctx->header + i + 4) = __swab32(*(u32 *) p);
if (ctx->base.header_size > 8)
memcpy(ctx->header + i + 8, p + 8, ctx->base.header_size - 8);
ctx->header_length += ctx->base.header_size;
}
static int handle_ir_dualbuffer_packet(struct context *context,
struct descriptor *d,
struct descriptor *last)
@ -1775,7 +1807,6 @@ static int handle_ir_dualbuffer_packet(struct context *context,
__le32 *ir_header;
size_t header_length;
void *p, *end;
int i;
if (db->first_res_count != 0 && db->second_res_count != 0) {
if (ctx->excess_bytes <= le16_to_cpu(db->second_req_count)) {
@ -1788,25 +1819,14 @@ static int handle_ir_dualbuffer_packet(struct context *context,
header_length = le16_to_cpu(db->first_req_count) -
le16_to_cpu(db->first_res_count);
i = ctx->header_length;
p = db + 1;
end = p + header_length;
while (p < end && i + ctx->base.header_size <= PAGE_SIZE) {
/*
* The iso header is byteswapped to little endian by
* the controller, but the remaining header quadlets
* are big endian. We want to present all the headers
* as big endian, so we have to swap the first
* quadlet.
*/
*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
i += ctx->base.header_size;
while (p < end) {
copy_iso_headers(ctx, p);
ctx->excess_bytes +=
(le32_to_cpu(*(__le32 *)(p + 4)) >> 16) & 0xffff;
p += ctx->base.header_size + 4;
p += max(ctx->base.header_size, (size_t)8);
}
ctx->header_length = i;
ctx->excess_bytes -= le16_to_cpu(db->second_req_count) -
le16_to_cpu(db->second_res_count);
@ -1832,7 +1852,6 @@ static int handle_ir_packet_per_buffer(struct context *context,
struct descriptor *pd;
__le32 *ir_header;
void *p;
int i;
for (pd = d; pd <= last; pd++) {
if (pd->transfer_status)
@ -1842,21 +1861,8 @@ static int handle_ir_packet_per_buffer(struct context *context,
/* Descriptor(s) not done yet, stop iteration */
return 0;
i = ctx->header_length;
p = last + 1;
if (ctx->base.header_size > 0 &&
i + ctx->base.header_size <= PAGE_SIZE) {
/*
* The iso header is byteswapped to little endian by
* the controller, but the remaining header quadlets
* are big endian. We want to present all the headers
* as big endian, so we have to swap the first quadlet.
*/
*(u32 *) (ctx->header + i) = __swab32(*(u32 *) (p + 4));
memcpy(ctx->header + i + 4, p + 8, ctx->base.header_size - 4);
ctx->header_length += ctx->base.header_size;
}
p = last + 1;
copy_iso_headers(ctx, p);
if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
ir_header = (__le32 *) p;
@ -1888,21 +1894,24 @@ static int handle_it_packet(struct context *context,
return 1;
}
static struct fw_iso_context *
ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
int type, int channel, size_t header_size)
{
struct fw_ohci *ohci = fw_ohci(card);
struct iso_context *ctx, *list;
descriptor_callback_t callback;
u64 *channels, dont_care = ~0ULL;
u32 *mask, regs;
unsigned long flags;
int index, retval = -ENOMEM;
int index, ret = -ENOMEM;
if (type == FW_ISO_CONTEXT_TRANSMIT) {
channels = &dont_care;
mask = &ohci->it_context_mask;
list = ohci->it_context_list;
callback = handle_it_packet;
} else {
channels = &ohci->ir_context_channels;
mask = &ohci->ir_context_mask;
list = ohci->ir_context_list;
if (ohci->use_dualbuffer)
@ -1912,9 +1921,11 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
}
spin_lock_irqsave(&ohci->lock, flags);
index = ffs(*mask) - 1;
if (index >= 0)
index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
if (index >= 0) {
*channels &= ~(1ULL << channel);
*mask &= ~(1 << index);
}
spin_unlock_irqrestore(&ohci->lock, flags);
if (index < 0)
@ -1932,8 +1943,8 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
if (ctx->header == NULL)
goto out;
retval = context_init(&ctx->context, ohci, regs, callback);
if (retval < 0)
ret = context_init(&ctx->context, ohci, regs, callback);
if (ret < 0)
goto out_with_header;
return &ctx->base;
@ -1945,7 +1956,7 @@ ohci_allocate_iso_context(struct fw_card *card, int type, size_t header_size)
*mask |= 1 << index;
spin_unlock_irqrestore(&ohci->lock, flags);
return ERR_PTR(retval);
return ERR_PTR(ret);
}
static int ohci_start_iso(struct fw_iso_context *base,
@ -2024,16 +2035,16 @@ static void ohci_free_iso_context(struct fw_iso_context *base)
} else {
index = ctx - ohci->ir_context_list;
ohci->ir_context_mask |= 1 << index;
ohci->ir_context_channels |= 1ULL << base->channel;
}
spin_unlock_irqrestore(&ohci->lock, flags);
}
static int
ohci_queue_iso_transmit(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
static int ohci_queue_iso_transmit(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
struct descriptor *d, *last, *pd;
@ -2128,11 +2139,10 @@ ohci_queue_iso_transmit(struct fw_iso_context *base,
return 0;
}
static int
ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
static int ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
struct db_descriptor *db = NULL;
@ -2151,11 +2161,11 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
z = 2;
/*
* The OHCI controller puts the status word in the header
* buffer too, so we need 4 extra bytes per packet.
* The OHCI controller puts the isochronous header and trailer in the
* buffer, so we need at least 8 bytes.
*/
packet_count = p->header_length / ctx->base.header_size;
header_size = packet_count * (ctx->base.header_size + 4);
header_size = packet_count * max(ctx->base.header_size, (size_t)8);
/* Get header size in number of descriptors. */
header_z = DIV_ROUND_UP(header_size, sizeof(*d));
@ -2173,7 +2183,8 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
db = (struct db_descriptor *) d;
db->control = cpu_to_le16(DESCRIPTOR_STATUS |
DESCRIPTOR_BRANCH_ALWAYS);
db->first_size = cpu_to_le16(ctx->base.header_size + 4);
db->first_size =
cpu_to_le16(max(ctx->base.header_size, (size_t)8));
if (p->skip && rest == p->payload_length) {
db->control |= cpu_to_le16(DESCRIPTOR_WAIT);
db->first_req_count = db->first_size;
@ -2208,11 +2219,10 @@ ohci_queue_iso_receive_dualbuffer(struct fw_iso_context *base,
return 0;
}
static int
ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
static int ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
struct descriptor *d = NULL, *pd = NULL;
@ -2223,11 +2233,11 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
int page, offset, packet_count, header_size, payload_per_buffer;
/*
* The OHCI controller puts the status word in the
* buffer too, so we need 4 extra bytes per packet.
* The OHCI controller puts the isochronous header and trailer in the
* buffer, so we need at least 8 bytes.
*/
packet_count = p->header_length / ctx->base.header_size;
header_size = ctx->base.header_size + 4;
header_size = max(ctx->base.header_size, (size_t)8);
/* Get header size in number of descriptors. */
header_z = DIV_ROUND_UP(header_size, sizeof(*d));
@ -2286,29 +2296,27 @@ ohci_queue_iso_receive_packet_per_buffer(struct fw_iso_context *base,
return 0;
}
static int
ohci_queue_iso(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
static int ohci_queue_iso(struct fw_iso_context *base,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload)
{
struct iso_context *ctx = container_of(base, struct iso_context, base);
unsigned long flags;
int retval;
int ret;
spin_lock_irqsave(&ctx->context.ohci->lock, flags);
if (base->type == FW_ISO_CONTEXT_TRANSMIT)
retval = ohci_queue_iso_transmit(base, packet, buffer, payload);
ret = ohci_queue_iso_transmit(base, packet, buffer, payload);
else if (ctx->context.ohci->use_dualbuffer)
retval = ohci_queue_iso_receive_dualbuffer(base, packet,
buffer, payload);
ret = ohci_queue_iso_receive_dualbuffer(base, packet,
buffer, payload);
else
retval = ohci_queue_iso_receive_packet_per_buffer(base, packet,
buffer,
payload);
ret = ohci_queue_iso_receive_packet_per_buffer(base, packet,
buffer, payload);
spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
return retval;
return ret;
}
static const struct fw_card_driver ohci_driver = {
@ -2357,8 +2365,8 @@ static void ohci_pmac_off(struct pci_dev *dev)
#define ohci_pmac_off(dev)
#endif /* CONFIG_PPC_PMAC */
static int __devinit
pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
static int __devinit pci_probe(struct pci_dev *dev,
const struct pci_device_id *ent)
{
struct fw_ohci *ohci;
u32 bus_options, max_receive, link_speed, version;
@ -2440,6 +2448,7 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
ohci->it_context_list = kzalloc(size, GFP_KERNEL);
reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
ohci->ir_context_channels = ~0ULL;
ohci->ir_context_mask = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
size = sizeof(struct iso_context) * hweight32(ohci->ir_context_mask);
@ -2467,11 +2476,12 @@ pci_probe(struct pci_dev *dev, const struct pci_device_id *ent)
reg_read(ohci, OHCI1394_GUIDLo);
err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
if (err < 0)
if (err)
goto fail_self_id;
fw_notify("Added fw-ohci device %s, OHCI version %x.%x\n",
dev_name(&dev->dev), version >> 16, version & 0xff);
return 0;
fail_self_id:

Просмотреть файл

@ -392,20 +392,18 @@ static const struct {
}
};
static void
free_orb(struct kref *kref)
static void free_orb(struct kref *kref)
{
struct sbp2_orb *orb = container_of(kref, struct sbp2_orb, kref);
kfree(orb);
}
static void
sbp2_status_write(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source,
int generation, int speed,
unsigned long long offset,
void *payload, size_t length, void *callback_data)
static void sbp2_status_write(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source,
int generation, int speed,
unsigned long long offset,
void *payload, size_t length, void *callback_data)
{
struct sbp2_logical_unit *lu = callback_data;
struct sbp2_orb *orb;
@ -451,9 +449,8 @@ sbp2_status_write(struct fw_card *card, struct fw_request *request,
fw_send_response(card, request, RCODE_COMPLETE);
}
static void
complete_transaction(struct fw_card *card, int rcode,
void *payload, size_t length, void *data)
static void complete_transaction(struct fw_card *card, int rcode,
void *payload, size_t length, void *data)
{
struct sbp2_orb *orb = data;
unsigned long flags;
@ -482,9 +479,8 @@ complete_transaction(struct fw_card *card, int rcode,
kref_put(&orb->kref, free_orb);
}
static void
sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
int node_id, int generation, u64 offset)
static void sbp2_send_orb(struct sbp2_orb *orb, struct sbp2_logical_unit *lu,
int node_id, int generation, u64 offset)
{
struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
unsigned long flags;
@ -531,8 +527,8 @@ static int sbp2_cancel_orbs(struct sbp2_logical_unit *lu)
return retval;
}
static void
complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
static void complete_management_orb(struct sbp2_orb *base_orb,
struct sbp2_status *status)
{
struct sbp2_management_orb *orb =
container_of(base_orb, struct sbp2_management_orb, base);
@ -542,10 +538,9 @@ complete_management_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
complete(&orb->done);
}
static int
sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
int generation, int function, int lun_or_login_id,
void *response)
static int sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
int generation, int function,
int lun_or_login_id, void *response)
{
struct fw_device *device = fw_device(lu->tgt->unit->device.parent);
struct sbp2_management_orb *orb;
@ -652,9 +647,8 @@ static void sbp2_agent_reset(struct sbp2_logical_unit *lu)
&d, sizeof(d));
}
static void
complete_agent_reset_write_no_wait(struct fw_card *card, int rcode,
void *payload, size_t length, void *data)
static void complete_agent_reset_write_no_wait(struct fw_card *card,
int rcode, void *payload, size_t length, void *data)
{
kfree(data);
}
@ -1299,8 +1293,7 @@ static void sbp2_unmap_scatterlist(struct device *card_device,
sizeof(orb->page_table), DMA_TO_DEVICE);
}
static unsigned int
sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
static unsigned int sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
{
int sam_status;
@ -1337,8 +1330,8 @@ sbp2_status_to_sense_data(u8 *sbp2_status, u8 *sense_data)
}
}
static void
complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
static void complete_command_orb(struct sbp2_orb *base_orb,
struct sbp2_status *status)
{
struct sbp2_command_orb *orb =
container_of(base_orb, struct sbp2_command_orb, base);
@ -1384,9 +1377,8 @@ complete_command_orb(struct sbp2_orb *base_orb, struct sbp2_status *status)
orb->done(orb->cmd);
}
static int
sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
struct sbp2_logical_unit *lu)
static int sbp2_map_scatterlist(struct sbp2_command_orb *orb,
struct fw_device *device, struct sbp2_logical_unit *lu)
{
struct scatterlist *sg = scsi_sglist(orb->cmd);
int i, n;
@ -1584,9 +1576,8 @@ static int sbp2_scsi_abort(struct scsi_cmnd *cmd)
* This is the concatenation of target port identifier and logical unit
* identifier as per SAM-2...SAM-4 annex A.
*/
static ssize_t
sbp2_sysfs_ieee1394_id_show(struct device *dev, struct device_attribute *attr,
char *buf)
static ssize_t sbp2_sysfs_ieee1394_id_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct scsi_device *sdev = to_scsi_device(dev);
struct sbp2_logical_unit *lu;

Просмотреть файл

@ -314,9 +314,8 @@ typedef void (*fw_node_callback_t)(struct fw_card * card,
struct fw_node * node,
struct fw_node * parent);
static void
for_each_fw_node(struct fw_card *card, struct fw_node *root,
fw_node_callback_t callback)
static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
fw_node_callback_t callback)
{
struct list_head list;
struct fw_node *node, *next, *child, *parent;
@ -349,9 +348,8 @@ for_each_fw_node(struct fw_card *card, struct fw_node *root,
fw_node_put(node);
}
static void
report_lost_node(struct fw_card *card,
struct fw_node *node, struct fw_node *parent)
static void report_lost_node(struct fw_card *card,
struct fw_node *node, struct fw_node *parent)
{
fw_node_event(card, node, FW_NODE_DESTROYED);
fw_node_put(node);
@ -360,9 +358,8 @@ report_lost_node(struct fw_card *card,
card->bm_retries = 0;
}
static void
report_found_node(struct fw_card *card,
struct fw_node *node, struct fw_node *parent)
static void report_found_node(struct fw_card *card,
struct fw_node *node, struct fw_node *parent)
{
int b_path = (node->phy_speed == SCODE_BETA);
@ -415,8 +412,7 @@ static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
* found, lost or updated. Update the nodes in the card topology tree
* as we go.
*/
static void
update_tree(struct fw_card *card, struct fw_node *root)
static void update_tree(struct fw_card *card, struct fw_node *root)
{
struct list_head list0, list1;
struct fw_node *node0, *node1, *next1;
@ -497,8 +493,8 @@ update_tree(struct fw_card *card, struct fw_node *root)
}
}
static void
update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
static void update_topology_map(struct fw_card *card,
u32 *self_ids, int self_id_count)
{
int node_count;
@ -510,10 +506,8 @@ update_topology_map(struct fw_card *card, u32 *self_ids, int self_id_count)
fw_compute_block_crc(card->topology_map);
}
void
fw_core_handle_bus_reset(struct fw_card *card,
int node_id, int generation,
int self_id_count, u32 * self_ids)
void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
int self_id_count, u32 *self_ids)
{
struct fw_node *local_node;
unsigned long flags;
@ -532,6 +526,7 @@ fw_core_handle_bus_reset(struct fw_card *card,
spin_lock_irqsave(&card->lock, flags);
card->broadcast_channel_allocated = false;
card->node_id = node_id;
/*
* Update node_id before generation to prevent anybody from using

Просмотреть файл

@ -19,6 +19,11 @@
#ifndef __fw_topology_h
#define __fw_topology_h
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/atomic.h>
enum {
FW_NODE_CREATED,
FW_NODE_UPDATED,
@ -51,26 +56,22 @@ struct fw_node {
struct fw_node *ports[0];
};
static inline struct fw_node *
fw_node_get(struct fw_node *node)
static inline struct fw_node *fw_node_get(struct fw_node *node)
{
atomic_inc(&node->ref_count);
return node;
}
static inline void
fw_node_put(struct fw_node *node)
static inline void fw_node_put(struct fw_node *node)
{
if (atomic_dec_and_test(&node->ref_count))
kfree(node);
}
void
fw_destroy_nodes(struct fw_card *card);
int
fw_compute_block_crc(u32 *block);
struct fw_card;
void fw_destroy_nodes(struct fw_card *card);
int fw_compute_block_crc(u32 *block);
#endif /* __fw_topology_h */

Просмотреть файл

@ -64,10 +64,8 @@
#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
#define PHY_IDENTIFIER(id) ((id) << 30)
static int
close_transaction(struct fw_transaction *transaction,
struct fw_card *card, int rcode,
u32 *payload, size_t length)
static int close_transaction(struct fw_transaction *transaction,
struct fw_card *card, int rcode)
{
struct fw_transaction *t;
unsigned long flags;
@ -83,7 +81,7 @@ close_transaction(struct fw_transaction *transaction,
spin_unlock_irqrestore(&card->lock, flags);
if (&t->link != &card->transaction_list) {
t->callback(card, rcode, payload, length, t->callback_data);
t->callback(card, rcode, NULL, 0, t->callback_data);
return 0;
}
@ -94,9 +92,8 @@ close_transaction(struct fw_transaction *transaction,
* Only valid for transactions that are potentially pending (ie have
* been sent).
*/
int
fw_cancel_transaction(struct fw_card *card,
struct fw_transaction *transaction)
int fw_cancel_transaction(struct fw_card *card,
struct fw_transaction *transaction)
{
/*
* Cancel the packet transmission if it's still queued. That
@ -112,20 +109,19 @@ fw_cancel_transaction(struct fw_card *card,
* if the transaction is still pending and remove it in that case.
*/
return close_transaction(transaction, card, RCODE_CANCELLED, NULL, 0);
return close_transaction(transaction, card, RCODE_CANCELLED);
}
EXPORT_SYMBOL(fw_cancel_transaction);
static void
transmit_complete_callback(struct fw_packet *packet,
struct fw_card *card, int status)
static void transmit_complete_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
struct fw_transaction *t =
container_of(packet, struct fw_transaction, packet);
switch (status) {
case ACK_COMPLETE:
close_transaction(t, card, RCODE_COMPLETE, NULL, 0);
close_transaction(t, card, RCODE_COMPLETE);
break;
case ACK_PENDING:
t->timestamp = packet->timestamp;
@ -133,31 +129,42 @@ transmit_complete_callback(struct fw_packet *packet,
case ACK_BUSY_X:
case ACK_BUSY_A:
case ACK_BUSY_B:
close_transaction(t, card, RCODE_BUSY, NULL, 0);
close_transaction(t, card, RCODE_BUSY);
break;
case ACK_DATA_ERROR:
close_transaction(t, card, RCODE_DATA_ERROR, NULL, 0);
close_transaction(t, card, RCODE_DATA_ERROR);
break;
case ACK_TYPE_ERROR:
close_transaction(t, card, RCODE_TYPE_ERROR, NULL, 0);
close_transaction(t, card, RCODE_TYPE_ERROR);
break;
default:
/*
* In this case the ack is really a juju specific
* rcode, so just forward that to the callback.
*/
close_transaction(t, card, status, NULL, 0);
close_transaction(t, card, status);
break;
}
}
static void
fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
int destination_id, int source_id, int generation, int speed,
unsigned long long offset, void *payload, size_t length)
{
int ext_tcode;
if (tcode == TCODE_STREAM_DATA) {
packet->header[0] =
HEADER_DATA_LENGTH(length) |
destination_id |
HEADER_TCODE(TCODE_STREAM_DATA);
packet->header_length = 4;
packet->payload = payload;
packet->payload_length = length;
goto common;
}
if (tcode > 0x10) {
ext_tcode = tcode & ~0x10;
tcode = TCODE_LOCK_REQUEST;
@ -204,7 +211,7 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
packet->payload_length = 0;
break;
}
common:
packet->speed = speed;
packet->generation = generation;
packet->ack = 0;
@ -246,13 +253,14 @@ fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
* @param callback function to be called when the transaction is completed
* @param callback_data pointer to arbitrary data, which will be
* passed to the callback
*
* In case of asynchronous stream packets i.e. TCODE_STREAM_DATA, the caller
* needs to synthesize @destination_id with fw_stream_packet_destination_id().
*/
void
fw_send_request(struct fw_card *card, struct fw_transaction *t,
int tcode, int destination_id, int generation, int speed,
unsigned long long offset,
void *payload, size_t length,
fw_transaction_callback_t callback, void *callback_data)
void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
int destination_id, int generation, int speed,
unsigned long long offset, void *payload, size_t length,
fw_transaction_callback_t callback, void *callback_data)
{
unsigned long flags;
int tlabel;
@ -322,16 +330,16 @@ static void transaction_callback(struct fw_card *card, int rcode,
* Returns the RCODE.
*/
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
int generation, int speed, unsigned long long offset,
void *data, size_t length)
int generation, int speed, unsigned long long offset,
void *payload, size_t length)
{
struct transaction_callback_data d;
struct fw_transaction t;
init_completion(&d.done);
d.payload = data;
d.payload = payload;
fw_send_request(card, &t, tcode, destination_id, generation, speed,
offset, data, length, transaction_callback, &d);
offset, payload, length, transaction_callback, &d);
wait_for_completion(&d.done);
return d.rcode;
@ -399,9 +407,8 @@ void fw_flush_transactions(struct fw_card *card)
}
}
static struct fw_address_handler *
lookup_overlapping_address_handler(struct list_head *list,
unsigned long long offset, size_t length)
static struct fw_address_handler *lookup_overlapping_address_handler(
struct list_head *list, unsigned long long offset, size_t length)
{
struct fw_address_handler *handler;
@ -414,9 +421,8 @@ lookup_overlapping_address_handler(struct list_head *list,
return NULL;
}
static struct fw_address_handler *
lookup_enclosing_address_handler(struct list_head *list,
unsigned long long offset, size_t length)
static struct fw_address_handler *lookup_enclosing_address_handler(
struct list_head *list, unsigned long long offset, size_t length)
{
struct fw_address_handler *handler;
@ -449,36 +455,44 @@ const struct fw_address_region fw_unit_space_region =
#endif /* 0 */
/**
* Allocate a range of addresses in the node space of the OHCI
* controller. When a request is received that falls within the
* specified address range, the specified callback is invoked. The
* parameters passed to the callback give the details of the
* particular request.
* fw_core_add_address_handler - register for incoming requests
* @handler: callback
* @region: region in the IEEE 1212 node space address range
*
* region->start, ->end, and handler->length have to be quadlet-aligned.
*
* When a request is received that falls within the specified address range,
* the specified callback is invoked. The parameters passed to the callback
* give the details of the particular request.
*
* Return value: 0 on success, non-zero otherwise.
* The start offset of the handler's address region is determined by
* fw_core_add_address_handler() and is returned in handler->offset.
* The offset is quadlet-aligned.
*/
int
fw_core_add_address_handler(struct fw_address_handler *handler,
const struct fw_address_region *region)
int fw_core_add_address_handler(struct fw_address_handler *handler,
const struct fw_address_region *region)
{
struct fw_address_handler *other;
unsigned long flags;
int ret = -EBUSY;
if (region->start & 0xffff000000000003ULL ||
region->end & 0xffff000000000003ULL ||
region->start >= region->end ||
handler->length & 3 ||
handler->length == 0)
return -EINVAL;
spin_lock_irqsave(&address_handler_lock, flags);
handler->offset = roundup(region->start, 4);
handler->offset = region->start;
while (handler->offset + handler->length <= region->end) {
other =
lookup_overlapping_address_handler(&address_handler_list,
handler->offset,
handler->length);
if (other != NULL) {
handler->offset =
roundup(other->offset + other->length, 4);
handler->offset += other->length;
} else {
list_add_tail(&handler->link, &address_handler_list);
ret = 0;
@ -493,12 +507,7 @@ fw_core_add_address_handler(struct fw_address_handler *handler,
EXPORT_SYMBOL(fw_core_add_address_handler);
/**
* Deallocate a range of addresses allocated with fw_allocate. This
* will call the associated callback one last time with a the special
* tcode TCODE_DEALLOCATE, to let the client destroy the registered
* callback data. For convenience, the callback parameters offset and
* length are set to the start and the length respectively for the
* deallocated region, payload is set to NULL.
* fw_core_remove_address_handler - unregister an address handler
*/
void fw_core_remove_address_handler(struct fw_address_handler *handler)
{
@ -518,9 +527,8 @@ struct fw_request {
u32 data[0];
};
static void
free_response_callback(struct fw_packet *packet,
struct fw_card *card, int status)
static void free_response_callback(struct fw_packet *packet,
struct fw_card *card, int status)
{
struct fw_request *request;
@ -528,9 +536,8 @@ free_response_callback(struct fw_packet *packet,
kfree(request);
}
void
fw_fill_response(struct fw_packet *response, u32 *request_header,
int rcode, void *payload, size_t length)
void fw_fill_response(struct fw_packet *response, u32 *request_header,
int rcode, void *payload, size_t length)
{
int tcode, tlabel, extended_tcode, source, destination;
@ -588,8 +595,7 @@ fw_fill_response(struct fw_packet *response, u32 *request_header,
}
EXPORT_SYMBOL(fw_fill_response);
static struct fw_request *
allocate_request(struct fw_packet *p)
static struct fw_request *allocate_request(struct fw_packet *p)
{
struct fw_request *request;
u32 *data, length;
@ -649,8 +655,8 @@ allocate_request(struct fw_packet *p)
return request;
}
void
fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
void fw_send_response(struct fw_card *card,
struct fw_request *request, int rcode)
{
/* unified transaction or broadcast transaction: don't respond */
if (request->ack != ACK_PENDING ||
@ -670,8 +676,7 @@ fw_send_response(struct fw_card *card, struct fw_request *request, int rcode)
}
EXPORT_SYMBOL(fw_send_response);
void
fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
{
struct fw_address_handler *handler;
struct fw_request *request;
@ -719,8 +724,7 @@ fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
}
EXPORT_SYMBOL(fw_core_handle_request);
void
fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
{
struct fw_transaction *t;
unsigned long flags;
@ -793,12 +797,10 @@ static const struct fw_address_region topology_map_region =
{ .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
.end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
static void
handle_topology_map(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source,
int generation, int speed,
unsigned long long offset,
void *payload, size_t length, void *callback_data)
static void handle_topology_map(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source, int generation,
int speed, unsigned long long offset,
void *payload, size_t length, void *callback_data)
{
int i, start, end;
__be32 *map;
@ -832,12 +834,10 @@ static const struct fw_address_region registers_region =
{ .start = CSR_REGISTER_BASE,
.end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
static void
handle_registers(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source,
int generation, int speed,
unsigned long long offset,
void *payload, size_t length, void *callback_data)
static void handle_registers(struct fw_card *card, struct fw_request *request,
int tcode, int destination, int source, int generation,
int speed, unsigned long long offset,
void *payload, size_t length, void *callback_data)
{
int reg = offset & ~CSR_REGISTER_BASE;
unsigned long long bus_time;
@ -939,11 +939,11 @@ static struct fw_descriptor model_id_descriptor = {
static int __init fw_core_init(void)
{
int retval;
int ret;
retval = bus_register(&fw_bus_type);
if (retval < 0)
return retval;
ret = bus_register(&fw_bus_type);
if (ret < 0)
return ret;
fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
if (fw_cdev_major < 0) {
@ -951,19 +951,10 @@ static int __init fw_core_init(void)
return fw_cdev_major;
}
retval = fw_core_add_address_handler(&topology_map,
&topology_map_region);
BUG_ON(retval < 0);
retval = fw_core_add_address_handler(&registers,
&registers_region);
BUG_ON(retval < 0);
/* Add the vendor textual descriptor. */
retval = fw_core_add_descriptor(&vendor_id_descriptor);
BUG_ON(retval < 0);
retval = fw_core_add_descriptor(&model_id_descriptor);
BUG_ON(retval < 0);
fw_core_add_address_handler(&topology_map, &topology_map_region);
fw_core_add_address_handler(&registers, &registers_region);
fw_core_add_descriptor(&vendor_id_descriptor);
fw_core_add_descriptor(&model_id_descriptor);
return 0;
}

Просмотреть файл

@ -82,14 +82,14 @@
#define CSR_SPEED_MAP 0x2000
#define CSR_SPEED_MAP_END 0x3000
#define BANDWIDTH_AVAILABLE_INITIAL 4915
#define BROADCAST_CHANNEL_INITIAL (1 << 31 | 31)
#define BROADCAST_CHANNEL_VALID (1 << 30)
#define fw_notify(s, args...) printk(KERN_NOTICE KBUILD_MODNAME ": " s, ## args)
#define fw_error(s, args...) printk(KERN_ERR KBUILD_MODNAME ": " s, ## args)
static inline void
fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
static inline void fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
{
u32 *dst = _dst;
__be32 *src = _src;
@ -99,8 +99,7 @@ fw_memcpy_from_be32(void *_dst, void *_src, size_t size)
dst[i] = be32_to_cpu(src[i]);
}
static inline void
fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
static inline void fw_memcpy_to_be32(void *_dst, void *_src, size_t size)
{
fw_memcpy_from_be32(_dst, _src, size);
}
@ -125,8 +124,7 @@ typedef void (*fw_packet_callback_t)(struct fw_packet *packet,
struct fw_card *card, int status);
typedef void (*fw_transaction_callback_t)(struct fw_card *card, int rcode,
void *data,
size_t length,
void *data, size_t length,
void *callback_data);
/*
@ -141,12 +139,6 @@ typedef void (*fw_address_callback_t)(struct fw_card *card,
void *data, size_t length,
void *callback_data);
typedef void (*fw_bus_reset_callback_t)(struct fw_card *handle,
int node_id, int generation,
u32 *self_ids,
int self_id_count,
void *callback_data);
struct fw_packet {
int speed;
int generation;
@ -187,12 +179,6 @@ struct fw_transaction {
void *callback_data;
};
static inline struct fw_packet *
fw_packet(struct list_head *l)
{
return list_entry(l, struct fw_packet, link);
}
struct fw_address_handler {
u64 offset;
size_t length;
@ -201,7 +187,6 @@ struct fw_address_handler {
struct list_head link;
};
struct fw_address_region {
u64 start;
u64 end;
@ -255,6 +240,7 @@ struct fw_card {
int bm_retries;
int bm_generation;
bool broadcast_channel_allocated;
u32 broadcast_channel;
u32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4];
};
@ -315,10 +301,8 @@ struct fw_iso_packet {
struct fw_iso_context;
typedef void (*fw_iso_callback_t)(struct fw_iso_context *context,
u32 cycle,
size_t header_length,
void *header,
void *data);
u32 cycle, size_t header_length,
void *header, void *data);
/*
* An iso buffer is just a set of pages mapped for DMA in the
@ -344,36 +328,25 @@ struct fw_iso_context {
void *callback_data;
};
int
fw_iso_buffer_init(struct fw_iso_buffer *buffer,
struct fw_card *card,
int page_count,
enum dma_data_direction direction);
int
fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
void
fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
int page_count, enum dma_data_direction direction);
int fw_iso_buffer_map(struct fw_iso_buffer *buffer, struct vm_area_struct *vma);
void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, struct fw_card *card);
struct fw_iso_context *
fw_iso_context_create(struct fw_card *card, int type,
int channel, int speed, size_t header_size,
fw_iso_callback_t callback, void *callback_data);
struct fw_iso_context *fw_iso_context_create(struct fw_card *card,
int type, int channel, int speed, size_t header_size,
fw_iso_callback_t callback, void *callback_data);
int fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload);
int fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags);
int fw_iso_context_stop(struct fw_iso_context *ctx);
void fw_iso_context_destroy(struct fw_iso_context *ctx);
void
fw_iso_context_destroy(struct fw_iso_context *ctx);
int
fw_iso_context_queue(struct fw_iso_context *ctx,
struct fw_iso_packet *packet,
struct fw_iso_buffer *buffer,
unsigned long payload);
int
fw_iso_context_start(struct fw_iso_context *ctx,
int cycle, int sync, int tags);
int
fw_iso_context_stop(struct fw_iso_context *ctx);
void fw_iso_resource_manage(struct fw_card *card, int generation,
u64 channels_mask, int *channel, int *bandwidth, bool allocate);
struct fw_card_driver {
/*
@ -415,7 +388,7 @@ struct fw_card_driver {
struct fw_iso_context *
(*allocate_iso_context)(struct fw_card *card,
int type, size_t header_size);
int type, int channel, size_t header_size);
void (*free_iso_context)(struct fw_iso_context *ctx);
int (*start_iso)(struct fw_iso_context *ctx,
@ -429,54 +402,45 @@ struct fw_card_driver {
int (*stop_iso)(struct fw_iso_context *ctx);
};
int
fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
int fw_core_initiate_bus_reset(struct fw_card *card, int short_reset);
void
fw_send_request(struct fw_card *card, struct fw_transaction *t,
void fw_send_request(struct fw_card *card, struct fw_transaction *t,
int tcode, int destination_id, int generation, int speed,
unsigned long long offset, void *data, size_t length,
unsigned long long offset, void *payload, size_t length,
fw_transaction_callback_t callback, void *callback_data);
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
int generation, int speed, unsigned long long offset,
void *data, size_t length);
int fw_cancel_transaction(struct fw_card *card,
struct fw_transaction *transaction);
void fw_flush_transactions(struct fw_card *card);
int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
int generation, int speed, unsigned long long offset,
void *payload, size_t length);
void fw_send_phy_config(struct fw_card *card,
int node_id, int generation, int gap_count);
static inline int fw_stream_packet_destination_id(int tag, int channel, int sy)
{
return tag << 14 | channel << 8 | sy;
}
/*
* Called by the topology code to inform the device code of node
* activity; found, lost, or updated nodes.
*/
void
fw_node_event(struct fw_card *card, struct fw_node *node, int event);
void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
/* API used by card level drivers */
void
fw_card_initialize(struct fw_card *card, const struct fw_card_driver *driver,
struct device *device);
int
fw_card_add(struct fw_card *card,
u32 max_receive, u32 link_speed, u64 guid);
void fw_card_initialize(struct fw_card *card,
const struct fw_card_driver *driver, struct device *device);
int fw_card_add(struct fw_card *card,
u32 max_receive, u32 link_speed, u64 guid);
void fw_core_remove_card(struct fw_card *card);
void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
int generation, int self_id_count, u32 *self_ids);
void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
void
fw_core_remove_card(struct fw_card *card);
void
fw_core_handle_bus_reset(struct fw_card *card,
int node_id, int generation,
int self_id_count, u32 *self_ids);
void
fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
void
fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
extern int fw_irm_set_broadcast_channel_register(struct device *dev,
void *data);
#endif /* __fw_transaction_h */

Просмотреть файл

@ -10,7 +10,8 @@ drm-y := drm_auth.o drm_bufs.o drm_cache.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o \
drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o
drm_crtc.o drm_crtc_helper.o drm_modes.o drm_edid.o \
drm_info.o drm_debugfs.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o

Просмотреть файл

@ -0,0 +1,235 @@
/**
* \file drm_debugfs.c
* debugfs support for DRM
*
* \author Ben Gamari <bgamari@gmail.com>
*/
/*
* Created: Sun Dec 21 13:08:50 2008 by bgamari@gmail.com
*
* Copyright 2008 Ben Gamari <bgamari@gmail.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include "drmP.h"
#if defined(CONFIG_DEBUG_FS)
/***************************************************
* Initialization, etc.
**************************************************/
static struct drm_info_list drm_debugfs_list[] = {
{"name", drm_name_info, 0},
{"vm", drm_vm_info, 0},
{"clients", drm_clients_info, 0},
{"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
{"gem_objects", drm_gem_object_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info, 0},
#endif
};
#define DRM_DEBUGFS_ENTRIES ARRAY_SIZE(drm_debugfs_list)
static int drm_debugfs_open(struct inode *inode, struct file *file)
{
struct drm_info_node *node = inode->i_private;
return single_open(file, node->info_ent->show, node);
}
static const struct file_operations drm_debugfs_fops = {
.owner = THIS_MODULE,
.open = drm_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/**
* Initialize a given set of debugfs files for a device
*
* \param files The array of files to create
* \param count The number of files given
* \param root DRI debugfs dir entry.
* \param minor device minor number
* \return Zero on success, non-zero on failure
*
* Create a given set of debugfs files represented by an array of
* gdm_debugfs_lists in the given root directory.
*/
int drm_debugfs_create_files(struct drm_info_list *files, int count,
struct dentry *root, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct dentry *ent;
struct drm_info_node *tmp;
char name[64];
int i, ret;
for (i = 0; i < count; i++) {
u32 features = files[i].driver_features;
if (features != 0 &&
(dev->driver->driver_features & features) != features)
continue;
tmp = drm_alloc(sizeof(struct drm_info_node),
_DRM_DRIVER);
ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
root, tmp, &drm_debugfs_fops);
if (!ent) {
DRM_ERROR("Cannot create /debugfs/dri/%s/%s\n",
name, files[i].name);
drm_free(tmp, sizeof(struct drm_info_node),
_DRM_DRIVER);
ret = -1;
goto fail;
}
tmp->minor = minor;
tmp->dent = ent;
tmp->info_ent = &files[i];
list_add(&(tmp->list), &(minor->debugfs_nodes.list));
}
return 0;
fail:
drm_debugfs_remove_files(files, count, minor);
return ret;
}
EXPORT_SYMBOL(drm_debugfs_create_files);
/**
* Initialize the DRI debugfs filesystem for a device
*
* \param dev DRM device
* \param minor device minor number
* \param root DRI debugfs dir entry.
*
* Create the DRI debugfs root entry "/debugfs/dri", the device debugfs root entry
* "/debugfs/dri/%minor%/", and each entry in debugfs_list as
* "/debugfs/dri/%minor%/%name%".
*/
int drm_debugfs_init(struct drm_minor *minor, int minor_id,
struct dentry *root)
{
struct drm_device *dev = minor->dev;
char name[64];
int ret;
INIT_LIST_HEAD(&minor->debugfs_nodes.list);
sprintf(name, "%d", minor_id);
minor->debugfs_root = debugfs_create_dir(name, root);
if (!minor->debugfs_root) {
DRM_ERROR("Cannot create /debugfs/dri/%s\n", name);
return -1;
}
ret = drm_debugfs_create_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
if (ret) {
debugfs_remove(minor->debugfs_root);
minor->debugfs_root = NULL;
DRM_ERROR("Failed to create core drm debugfs files\n");
return ret;
}
if (dev->driver->debugfs_init) {
ret = dev->driver->debugfs_init(minor);
if (ret) {
DRM_ERROR("DRM: Driver failed to initialize "
"/debugfs/dri.\n");
return ret;
}
}
return 0;
}
/**
* Remove a list of debugfs files
*
* \param files The list of files
* \param count The number of files
* \param minor The minor of which we should remove the files
* \return always zero.
*
* Remove all debugfs entries created by debugfs_init().
*/
int drm_debugfs_remove_files(struct drm_info_list *files, int count,
struct drm_minor *minor)
{
struct list_head *pos, *q;
struct drm_info_node *tmp;
int i;
for (i = 0; i < count; i++) {
list_for_each_safe(pos, q, &minor->debugfs_nodes.list) {
tmp = list_entry(pos, struct drm_info_node, list);
if (tmp->info_ent == &files[i]) {
debugfs_remove(tmp->dent);
list_del(pos);
drm_free(tmp, sizeof(struct drm_info_node),
_DRM_DRIVER);
}
}
}
return 0;
}
EXPORT_SYMBOL(drm_debugfs_remove_files);
/**
* Cleanup the debugfs filesystem resources.
*
* \param minor device minor number.
* \return always zero.
*
* Remove all debugfs entries created by debugfs_init().
*/
int drm_debugfs_cleanup(struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
if (!minor->debugfs_root)
return 0;
if (dev->driver->debugfs_cleanup)
dev->driver->debugfs_cleanup(minor);
drm_debugfs_remove_files(drm_debugfs_list, DRM_DEBUGFS_ENTRIES, minor);
debugfs_remove(minor->debugfs_root);
minor->debugfs_root = NULL;
return 0;
}
#endif /* CONFIG_DEBUG_FS */

Просмотреть файл

@ -46,9 +46,11 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/debugfs.h>
#include "drmP.h"
#include "drm_core.h"
static int drm_version(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@ -178,7 +180,7 @@ int drm_lastclose(struct drm_device * dev)
/* Clear AGP information */
if (drm_core_has_AGP(dev) && dev->agp &&
!drm_core_check_feature(dev, DRIVER_MODESET)) {
!drm_core_check_feature(dev, DRIVER_MODESET)) {
struct drm_agp_mem *entry, *tempe;
/* Remove AGP resources, but leave dev->agp
@ -382,6 +384,13 @@ static int __init drm_core_init(void)
goto err_p3;
}
drm_debugfs_root = debugfs_create_dir("dri", NULL);
if (!drm_debugfs_root) {
DRM_ERROR("Cannot create /debugfs/dri\n");
ret = -1;
goto err_p3;
}
drm_mem_init();
DRM_INFO("Initialized %s %d.%d.%d %s\n",
@ -400,6 +409,7 @@ err_p1:
static void __exit drm_core_exit(void)
{
remove_proc_entry("dri", NULL);
debugfs_remove(drm_debugfs_root);
drm_sysfs_destroy();
unregister_chrdev(DRM_MAJOR, "drm");

328
drivers/gpu/drm/drm_info.c Normal file
Просмотреть файл

@ -0,0 +1,328 @@
/**
* \file drm_info.c
* DRM info file implementations
*
* \author Ben Gamari <bgamari@gmail.com>
*/
/*
* Created: Sun Dec 21 13:09:50 2008 by bgamari@gmail.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* Copyright 2008 Ben Gamari <bgamari@gmail.com>
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/seq_file.h>
#include "drmP.h"
/**
* Called when "/proc/dri/.../name" is read.
*
* Prints the device name together with the bus id if available.
*/
int drm_name_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_minor *minor = node->minor;
struct drm_device *dev = minor->dev;
struct drm_master *master = minor->master;
if (!master)
return 0;
if (master->unique) {
seq_printf(m, "%s %s %s\n",
dev->driver->pci_driver.name,
pci_name(dev->pdev), master->unique);
} else {
seq_printf(m, "%s %s\n", dev->driver->pci_driver.name,
pci_name(dev->pdev));
}
return 0;
}
/**
* Called when "/proc/dri/.../vm" is read.
*
* Prints information about all mappings in drm_device::maplist.
*/
int drm_vm_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_map *map;
struct drm_map_list *r_list;
/* Hardcoded from _DRM_FRAME_BUFFER,
_DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
_DRM_SCATTER_GATHER and _DRM_CONSISTENT */
const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
const char *type;
int i;
mutex_lock(&dev->struct_mutex);
seq_printf(m, "slot offset size type flags address mtrr\n\n");
i = 0;
list_for_each_entry(r_list, &dev->maplist, head) {
map = r_list->map;
if (!map)
continue;
if (map->type < 0 || map->type > 5)
type = "??";
else
type = types[map->type];
seq_printf(m, "%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
i,
map->offset,
map->size, type, map->flags,
(unsigned long) r_list->user_token);
if (map->mtrr < 0)
seq_printf(m, "none\n");
else
seq_printf(m, "%4d\n", map->mtrr);
i++;
}
mutex_unlock(&dev->struct_mutex);
return 0;
}
/**
* Called when "/proc/dri/.../queues" is read.
*/
int drm_queues_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
int i;
struct drm_queue *q;
mutex_lock(&dev->struct_mutex);
seq_printf(m, " ctx/flags use fin"
" blk/rw/rwf wait flushed queued"
" locks\n\n");
for (i = 0; i < dev->queue_count; i++) {
q = dev->queuelist[i];
atomic_inc(&q->use_count);
seq_printf(m, "%5d/0x%03x %5d %5d"
" %5d/%c%c/%c%c%c %5Zd\n",
i,
q->flags,
atomic_read(&q->use_count),
atomic_read(&q->finalization),
atomic_read(&q->block_count),
atomic_read(&q->block_read) ? 'r' : '-',
atomic_read(&q->block_write) ? 'w' : '-',
waitqueue_active(&q->read_queue) ? 'r' : '-',
waitqueue_active(&q->write_queue) ? 'w' : '-',
waitqueue_active(&q->flush_queue) ? 'f' : '-',
DRM_BUFCOUNT(&q->waitlist));
atomic_dec(&q->use_count);
}
mutex_unlock(&dev->struct_mutex);
return 0;
}
/**
* Called when "/proc/dri/.../bufs" is read.
*/
int drm_bufs_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_device_dma *dma;
int i, seg_pages;
mutex_lock(&dev->struct_mutex);
dma = dev->dma;
if (!dma) {
mutex_unlock(&dev->struct_mutex);
return 0;
}
seq_printf(m, " o size count free segs pages kB\n\n");
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].buf_count) {
seg_pages = dma->bufs[i].seg_count * (1 << dma->bufs[i].page_order);
seq_printf(m, "%2d %8d %5d %5d %5d %5d %5ld\n",
i,
dma->bufs[i].buf_size,
dma->bufs[i].buf_count,
atomic_read(&dma->bufs[i].freelist.count),
dma->bufs[i].seg_count,
seg_pages,
seg_pages * PAGE_SIZE / 1024);
}
}
seq_printf(m, "\n");
for (i = 0; i < dma->buf_count; i++) {
if (i && !(i % 32))
seq_printf(m, "\n");
seq_printf(m, " %d", dma->buflist[i]->list);
}
seq_printf(m, "\n");
mutex_unlock(&dev->struct_mutex);
return 0;
}
/**
* Called when "/proc/dri/.../vblank" is read.
*/
int drm_vblank_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
int crtc;
mutex_lock(&dev->struct_mutex);
for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
seq_printf(m, "CRTC %d enable: %d\n",
crtc, atomic_read(&dev->vblank_refcount[crtc]));
seq_printf(m, "CRTC %d counter: %d\n",
crtc, drm_vblank_count(dev, crtc));
seq_printf(m, "CRTC %d last wait: %d\n",
crtc, dev->last_vblank_wait[crtc]);
seq_printf(m, "CRTC %d in modeset: %d\n",
crtc, dev->vblank_inmodeset[crtc]);
}
mutex_unlock(&dev->struct_mutex);
return 0;
}
/**
* Called when "/proc/dri/.../clients" is read.
*
*/
int drm_clients_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_file *priv;
mutex_lock(&dev->struct_mutex);
seq_printf(m, "a dev pid uid magic ioctls\n\n");
list_for_each_entry(priv, &dev->filelist, lhead) {
seq_printf(m, "%c %3d %5d %5d %10u %10lu\n",
priv->authenticated ? 'y' : 'n',
priv->minor->index,
priv->pid,
priv->uid, priv->magic, priv->ioctl_count);
}
mutex_unlock(&dev->struct_mutex);
return 0;
}
int drm_gem_one_name_info(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
struct seq_file *m = data;
seq_printf(m, "name %d size %zd\n", obj->name, obj->size);
seq_printf(m, "%6d %8zd %7d %8d\n",
obj->name, obj->size,
atomic_read(&obj->handlecount.refcount),
atomic_read(&obj->refcount.refcount));
return 0;
}
int drm_gem_name_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
seq_printf(m, " name size handles refcount\n");
idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, m);
return 0;
}
int drm_gem_object_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
seq_printf(m, "%d gtt total\n", dev->gtt_total);
return 0;
}
#if DRM_DEBUG_CODE
int drm_vma_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_vma_entry *pt;
struct vm_area_struct *vma;
#if defined(__i386__)
unsigned int pgprot;
#endif
mutex_lock(&dev->struct_mutex);
seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
atomic_read(&dev->vma_count),
high_memory, (u64)virt_to_phys(high_memory));
list_for_each_entry(pt, &dev->vmalist, head) {
vma = pt->vma;
if (!vma)
continue;
seq_printf(m,
"\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
pt->pid, vma->vm_start, vma->vm_end,
vma->vm_flags & VM_READ ? 'r' : '-',
vma->vm_flags & VM_WRITE ? 'w' : '-',
vma->vm_flags & VM_EXEC ? 'x' : '-',
vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
vma->vm_flags & VM_LOCKED ? 'l' : '-',
vma->vm_flags & VM_IO ? 'i' : '-',
vma->vm_pgoff);
#if defined(__i386__)
pgprot = pgprot_val(vma->vm_page_prot);
seq_printf(m, " %c%c%c%c%c%c%c%c%c",
pgprot & _PAGE_PRESENT ? 'p' : '-',
pgprot & _PAGE_RW ? 'w' : 'r',
pgprot & _PAGE_USER ? 'u' : 's',
pgprot & _PAGE_PWT ? 't' : 'b',
pgprot & _PAGE_PCD ? 'u' : 'c',
pgprot & _PAGE_ACCESSED ? 'a' : '-',
pgprot & _PAGE_DIRTY ? 'd' : '-',
pgprot & _PAGE_PSE ? 'm' : 'k',
pgprot & _PAGE_GLOBAL ? 'g' : 'l');
#endif
seq_printf(m, "\n");
}
mutex_unlock(&dev->struct_mutex);
return 0;
}
#endif

Просмотреть файл

@ -37,58 +37,105 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/seq_file.h>
#include "drmP.h"
static int drm_name_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_vm_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_clients_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_queues_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_bufs_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_vblank_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_gem_name_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
static int drm_gem_object_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
#if DRM_DEBUG_CODE
static int drm_vma_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data);
#endif
/***************************************************
* Initialization, etc.
**************************************************/
/**
* Proc file list.
*/
static struct drm_proc_list {
const char *name; /**< file name */
int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/
u32 driver_features; /**< Required driver features for this entry */
} drm_proc_list[] = {
static struct drm_info_list drm_proc_list[] = {
{"name", drm_name_info, 0},
{"mem", drm_mem_info, 0},
{"vm", drm_vm_info, 0},
{"clients", drm_clients_info, 0},
{"queues", drm_queues_info, 0},
{"bufs", drm_bufs_info, 0},
{"vblank", drm_vblank_info, 0},
{"gem_names", drm_gem_name_info, DRIVER_GEM},
{"gem_objects", drm_gem_object_info, DRIVER_GEM},
#if DRM_DEBUG_CODE
{"vma", drm_vma_info},
{"vma", drm_vma_info, 0},
#endif
};
#define DRM_PROC_ENTRIES ARRAY_SIZE(drm_proc_list)
static int drm_proc_open(struct inode *inode, struct file *file)
{
struct drm_info_node* node = PDE(inode)->data;
return single_open(file, node->info_ent->show, node);
}
static const struct file_operations drm_proc_fops = {
.owner = THIS_MODULE,
.open = drm_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
/**
* Initialize the DRI proc filesystem for a device.
* Initialize a given set of proc files for a device
*
* \param dev DRM device.
* \param minor device minor number.
* \param files The array of files to create
* \param count The number of files given
* \param root DRI proc dir entry.
* \param minor device minor number
* \return Zero on success, non-zero on failure
*
* Create a given set of proc files represented by an array of
* gdm_proc_lists in the given root directory.
*/
int drm_proc_create_files(struct drm_info_list *files, int count,
struct proc_dir_entry *root, struct drm_minor *minor)
{
struct drm_device *dev = minor->dev;
struct proc_dir_entry *ent;
struct drm_info_node *tmp;
char name[64];
int i, ret;
for (i = 0; i < count; i++) {
u32 features = files[i].driver_features;
if (features != 0 &&
(dev->driver->driver_features & features) != features)
continue;
tmp = drm_alloc(sizeof(struct drm_info_node), _DRM_DRIVER);
ent = create_proc_entry(files[i].name, S_IFREG | S_IRUGO, root);
if (!ent) {
DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
name, files[i].name);
drm_free(tmp, sizeof(struct drm_info_node),
_DRM_DRIVER);
ret = -1;
goto fail;
}
ent->proc_fops = &drm_proc_fops;
ent->data = tmp;
tmp->minor = minor;
tmp->info_ent = &files[i];
list_add(&(tmp->list), &(minor->proc_nodes.list));
}
return 0;
fail:
for (i = 0; i < count; i++)
remove_proc_entry(drm_proc_list[i].name, minor->proc_root);
return ret;
}
/**
* Initialize the DRI proc filesystem for a device
*
* \param dev DRM device
* \param minor device minor number
* \param root DRI proc dir entry.
* \param dev_root resulting DRI device proc dir entry.
* \return root entry pointer on success, or NULL on failure.
@ -101,34 +148,24 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
struct proc_dir_entry *root)
{
struct drm_device *dev = minor->dev;
struct proc_dir_entry *ent;
int i, j, ret;
char name[64];
int ret;
INIT_LIST_HEAD(&minor->proc_nodes.list);
sprintf(name, "%d", minor_id);
minor->dev_root = proc_mkdir(name, root);
if (!minor->dev_root) {
minor->proc_root = proc_mkdir(name, root);
if (!minor->proc_root) {
DRM_ERROR("Cannot create /proc/dri/%s\n", name);
return -1;
}
for (i = 0; i < DRM_PROC_ENTRIES; i++) {
u32 features = drm_proc_list[i].driver_features;
if (features != 0 &&
(dev->driver->driver_features & features) != features)
continue;
ent = create_proc_entry(drm_proc_list[i].name,
S_IFREG | S_IRUGO, minor->dev_root);
if (!ent) {
DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
name, drm_proc_list[i].name);
ret = -1;
goto fail;
}
ent->read_proc = drm_proc_list[i].f;
ent->data = minor;
ret = drm_proc_create_files(drm_proc_list, DRM_PROC_ENTRIES,
minor->proc_root, minor);
if (ret) {
remove_proc_entry(name, root);
minor->proc_root = NULL;
DRM_ERROR("Failed to create core drm proc files\n");
return ret;
}
if (dev->driver->proc_init) {
@ -136,19 +173,32 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
if (ret) {
DRM_ERROR("DRM: Driver failed to initialize "
"/proc/dri.\n");
goto fail;
return ret;
}
}
return 0;
fail:
}
for (j = 0; j < i; j++)
remove_proc_entry(drm_proc_list[i].name,
minor->dev_root);
remove_proc_entry(name, root);
minor->dev_root = NULL;
return ret;
int drm_proc_remove_files(struct drm_info_list *files, int count,
struct drm_minor *minor)
{
struct list_head *pos, *q;
struct drm_info_node *tmp;
int i;
for (i = 0; i < count; i++) {
list_for_each_safe(pos, q, &minor->proc_nodes.list) {
tmp = list_entry(pos, struct drm_info_node, list);
if (tmp->info_ent == &files[i]) {
remove_proc_entry(files[i].name,
minor->proc_root);
list_del(pos);
drm_free(tmp, sizeof(struct drm_info_node),
_DRM_DRIVER);
}
}
}
return 0;
}
/**
@ -164,570 +214,19 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
{
struct drm_device *dev = minor->dev;
int i;
char name[64];
if (!root || !minor->dev_root)
if (!root || !minor->proc_root)
return 0;
if (dev->driver->proc_cleanup)
dev->driver->proc_cleanup(minor);
for (i = 0; i < DRM_PROC_ENTRIES; i++)
remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
sprintf(name, "%d", minor->index);
remove_proc_entry(name, root);
return 0;
}
/**
* Called when "/proc/dri/.../name" is read.
*
* \param buf output buffer.
* \param start start of output data.
* \param offset requested start offset.
* \param request requested number of bytes.
* \param eof whether there is no more data to return.
* \param data private data.
* \return number of written bytes.
*
* Prints the device name together with the bus id if available.
*/
static int drm_name_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_master *master = minor->master;
struct drm_device *dev = minor->dev;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
if (!master)
return 0;
*start = &buf[offset];
*eof = 0;
if (master->unique) {
DRM_PROC_PRINT("%s %s %s\n",
dev->driver->pci_driver.name,
pci_name(dev->pdev), master->unique);
} else {
DRM_PROC_PRINT("%s %s\n", dev->driver->pci_driver.name,
pci_name(dev->pdev));
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
/**
* Called when "/proc/dri/.../vm" is read.
*
* \param buf output buffer.
* \param start start of output data.
* \param offset requested start offset.
* \param request requested number of bytes.
* \param eof whether there is no more data to return.
* \param data private data.
* \return number of written bytes.
*
* Prints information about all mappings in drm_device::maplist.
*/
static int drm__vm_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int len = 0;
struct drm_map *map;
struct drm_map_list *r_list;
/* Hardcoded from _DRM_FRAME_BUFFER,
_DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
_DRM_SCATTER_GATHER and _DRM_CONSISTENT */
const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
const char *type;
int i;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("slot offset size type flags "
"address mtrr\n\n");
i = 0;
list_for_each_entry(r_list, &dev->maplist, head) {
map = r_list->map;
if (!map)
continue;
if (map->type < 0 || map->type > 5)
type = "??";
else
type = types[map->type];
DRM_PROC_PRINT("%4d 0x%08lx 0x%08lx %4.4s 0x%02x 0x%08lx ",
i,
map->offset,
map->size, type, map->flags,
(unsigned long) r_list->user_token);
if (map->mtrr < 0) {
DRM_PROC_PRINT("none\n");
} else {
DRM_PROC_PRINT("%4d\n", map->mtrr);
}
i++;
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
/**
* Simply calls _vm_info() while holding the drm_device::struct_mutex lock.
*/
static int drm_vm_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm__vm_info(buf, start, offset, request, eof, data);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/**
* Called when "/proc/dri/.../queues" is read.
*
* \param buf output buffer.
* \param start start of output data.
* \param offset requested start offset.
* \param request requested number of bytes.
* \param eof whether there is no more data to return.
* \param data private data.
* \return number of written bytes.
*/
static int drm__queues_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int len = 0;
int i;
struct drm_queue *q;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT(" ctx/flags use fin"
" blk/rw/rwf wait flushed queued"
" locks\n\n");
for (i = 0; i < dev->queue_count; i++) {
q = dev->queuelist[i];
atomic_inc(&q->use_count);
DRM_PROC_PRINT_RET(atomic_dec(&q->use_count),
"%5d/0x%03x %5d %5d"
" %5d/%c%c/%c%c%c %5Zd\n",
i,
q->flags,
atomic_read(&q->use_count),
atomic_read(&q->finalization),
atomic_read(&q->block_count),
atomic_read(&q->block_read) ? 'r' : '-',
atomic_read(&q->block_write) ? 'w' : '-',
waitqueue_active(&q->read_queue) ? 'r' : '-',
waitqueue_active(&q->
write_queue) ? 'w' : '-',
waitqueue_active(&q->
flush_queue) ? 'f' : '-',
DRM_BUFCOUNT(&q->waitlist));
atomic_dec(&q->use_count);
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
/**
* Simply calls _queues_info() while holding the drm_device::struct_mutex lock.
*/
static int drm_queues_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm__queues_info(buf, start, offset, request, eof, data);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/**
* Called when "/proc/dri/.../bufs" is read.
*
* \param buf output buffer.
* \param start start of output data.
* \param offset requested start offset.
* \param request requested number of bytes.
* \param eof whether there is no more data to return.
* \param data private data.
* \return number of written bytes.
*/
static int drm__bufs_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int len = 0;
struct drm_device_dma *dma = dev->dma;
int i;
if (!dma || offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT(" o size count free segs pages kB\n\n");
for (i = 0; i <= DRM_MAX_ORDER; i++) {
if (dma->bufs[i].buf_count)
DRM_PROC_PRINT("%2d %8d %5d %5d %5d %5d %5ld\n",
i,
dma->bufs[i].buf_size,
dma->bufs[i].buf_count,
atomic_read(&dma->bufs[i]
.freelist.count),
dma->bufs[i].seg_count,
dma->bufs[i].seg_count
* (1 << dma->bufs[i].page_order),
(dma->bufs[i].seg_count
* (1 << dma->bufs[i].page_order))
* PAGE_SIZE / 1024);
}
DRM_PROC_PRINT("\n");
for (i = 0; i < dma->buf_count; i++) {
if (i && !(i % 32))
DRM_PROC_PRINT("\n");
DRM_PROC_PRINT(" %d", dma->buflist[i]->list);
}
DRM_PROC_PRINT("\n");
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
/**
* Simply calls _bufs_info() while holding the drm_device::struct_mutex lock.
*/
static int drm_bufs_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm__bufs_info(buf, start, offset, request, eof, data);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/**
* Called when "/proc/dri/.../vblank" is read.
*
* \param buf output buffer.
* \param start start of output data.
* \param offset requested start offset.
* \param request requested number of bytes.
* \param eof whether there is no more data to return.
* \param data private data.
* \return number of written bytes.
*/
static int drm__vblank_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int len = 0;
int crtc;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
for (crtc = 0; crtc < dev->num_crtcs; crtc++) {
DRM_PROC_PRINT("CRTC %d enable: %d\n",
crtc, atomic_read(&dev->vblank_refcount[crtc]));
DRM_PROC_PRINT("CRTC %d counter: %d\n",
crtc, drm_vblank_count(dev, crtc));
DRM_PROC_PRINT("CRTC %d last wait: %d\n",
crtc, dev->last_vblank_wait[crtc]);
DRM_PROC_PRINT("CRTC %d in modeset: %d\n",
crtc, dev->vblank_inmodeset[crtc]);
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
/**
* Simply calls _vblank_info() while holding the drm_device::struct_mutex lock.
*/
static int drm_vblank_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm__vblank_info(buf, start, offset, request, eof, data);
mutex_unlock(&dev->struct_mutex);
return ret;
}
/**
* Called when "/proc/dri/.../clients" is read.
*
* \param buf output buffer.
* \param start start of output data.
* \param offset requested start offset.
* \param request requested number of bytes.
* \param eof whether there is no more data to return.
* \param data private data.
* \return number of written bytes.
*/
static int drm__clients_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int len = 0;
struct drm_file *priv;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("a dev pid uid magic ioctls\n\n");
list_for_each_entry(priv, &dev->filelist, lhead) {
DRM_PROC_PRINT("%c %3d %5d %5d %10u %10lu\n",
priv->authenticated ? 'y' : 'n',
priv->minor->index,
priv->pid,
priv->uid, priv->magic, priv->ioctl_count);
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
/**
* Simply calls _clients_info() while holding the drm_device::struct_mutex lock.
*/
static int drm_clients_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm__clients_info(buf, start, offset, request, eof, data);
mutex_unlock(&dev->struct_mutex);
return ret;
}
struct drm_gem_name_info_data {
int len;
char *buf;
int eof;
};
static int drm_gem_one_name_info(int id, void *ptr, void *data)
{
struct drm_gem_object *obj = ptr;
struct drm_gem_name_info_data *nid = data;
DRM_INFO("name %d size %zd\n", obj->name, obj->size);
if (nid->eof)
return 0;
nid->len += sprintf(&nid->buf[nid->len],
"%6d %8zd %7d %8d\n",
obj->name, obj->size,
atomic_read(&obj->handlecount.refcount),
atomic_read(&obj->refcount.refcount));
if (nid->len > DRM_PROC_LIMIT) {
nid->eof = 1;
return 0;
}
return 0;
}
static int drm_gem_name_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
struct drm_gem_name_info_data nid;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
nid.len = sprintf(buf, " name size handles refcount\n");
nid.buf = buf;
nid.eof = 0;
idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
*start = &buf[offset];
*eof = 0;
if (nid.len > request + offset)
return request;
*eof = 1;
return nid.len - offset;
}
static int drm_gem_object_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
#if DRM_DEBUG_CODE
static int drm__vma_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int len = 0;
struct drm_vma_entry *pt;
struct vm_area_struct *vma;
#if defined(__i386__)
unsigned int pgprot;
#endif
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("vma use count: %d, high_memory = %p, 0x%llx\n",
atomic_read(&dev->vma_count),
high_memory, (u64)virt_to_phys(high_memory));
list_for_each_entry(pt, &dev->vmalist, head) {
if (!(vma = pt->vma))
continue;
DRM_PROC_PRINT("\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
pt->pid,
vma->vm_start,
vma->vm_end,
vma->vm_flags & VM_READ ? 'r' : '-',
vma->vm_flags & VM_WRITE ? 'w' : '-',
vma->vm_flags & VM_EXEC ? 'x' : '-',
vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
vma->vm_flags & VM_LOCKED ? 'l' : '-',
vma->vm_flags & VM_IO ? 'i' : '-',
vma->vm_pgoff);
#if defined(__i386__)
pgprot = pgprot_val(vma->vm_page_prot);
DRM_PROC_PRINT(" %c%c%c%c%c%c%c%c%c",
pgprot & _PAGE_PRESENT ? 'p' : '-',
pgprot & _PAGE_RW ? 'w' : 'r',
pgprot & _PAGE_USER ? 'u' : 's',
pgprot & _PAGE_PWT ? 't' : 'b',
pgprot & _PAGE_PCD ? 'u' : 'c',
pgprot & _PAGE_ACCESSED ? 'a' : '-',
pgprot & _PAGE_DIRTY ? 'd' : '-',
pgprot & _PAGE_PSE ? 'm' : 'k',
pgprot & _PAGE_GLOBAL ? 'g' : 'l');
#endif
DRM_PROC_PRINT("\n");
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int drm_vma_info(char *buf, char **start, off_t offset, int request,
int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
int ret;
mutex_lock(&dev->struct_mutex);
ret = drm__vma_info(buf, start, offset, request, eof, data);
mutex_unlock(&dev->struct_mutex);
return ret;
}
#endif

Просмотреть файл

@ -50,6 +50,7 @@ struct idr drm_minors_idr;
struct class *drm_class;
struct proc_dir_entry *drm_proc_root;
struct dentry *drm_debugfs_root;
static int drm_minor_get_id(struct drm_device *dev, int type)
{
@ -313,7 +314,15 @@ static int drm_get_minor(struct drm_device *dev, struct drm_minor **minor, int t
goto err_mem;
}
} else
new_minor->dev_root = NULL;
new_minor->proc_root = NULL;
#if defined(CONFIG_DEBUG_FS)
ret = drm_debugfs_init(new_minor, minor_id, drm_debugfs_root);
if (ret) {
DRM_ERROR("DRM: Failed to initialize /debugfs/dri.\n");
goto err_g2;
}
#endif
ret = drm_sysfs_device_add(new_minor);
if (ret) {
@ -451,6 +460,10 @@ int drm_put_minor(struct drm_minor **minor_p)
if (minor->type == DRM_MINOR_LEGACY)
drm_proc_cleanup(minor, drm_proc_root);
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_cleanup(minor);
#endif
drm_sysfs_device_remove(minor);
idr_remove(&drm_minors_idr, minor->index);

Просмотреть файл

@ -7,7 +7,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_suspend.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_proc.o \
i915_gem_debugfs.o \
i915_gem_tiling.o \
intel_display.o \
intel_crt.o \

Просмотреть файл

@ -41,7 +41,6 @@
int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
u32 last_acthd = I915_READ(acthd_reg);
@ -58,8 +57,12 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
if (ring->space >= n)
return 0;
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
if (dev->primary->master) {
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
}
if (ring->head != last_head)
i = 0;
@ -356,7 +359,7 @@ static int validate_cmd(int cmd)
return ret;
}
static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwords)
static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
@ -370,8 +373,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
for (i = 0; i < dwords;) {
int cmd, sz;
if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i], sizeof(cmd)))
return -EINVAL;
cmd = buffer[i];
if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
return -EINVAL;
@ -379,11 +381,7 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
OUT_RING(cmd);
while (++i, --sz) {
if (DRM_COPY_FROM_USER_UNCHECKED(&cmd, &buffer[i],
sizeof(cmd))) {
return -EINVAL;
}
OUT_RING(cmd);
OUT_RING(buffer[i]);
}
}
@ -397,17 +395,13 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
int
i915_emit_box(struct drm_device *dev,
struct drm_clip_rect __user *boxes,
struct drm_clip_rect *boxes,
int i, int DR1, int DR4)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_clip_rect box;
struct drm_clip_rect box = boxes[i];
RING_LOCALS;
if (DRM_COPY_FROM_USER_UNCHECKED(&box, &boxes[i], sizeof(box))) {
return -EFAULT;
}
if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
box.x1, box.y1, box.x2, box.y2);
@ -460,7 +454,9 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
}
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
drm_i915_cmdbuffer_t * cmd)
drm_i915_cmdbuffer_t *cmd,
struct drm_clip_rect *cliprects,
void *cmdbuf)
{
int nbox = cmd->num_cliprects;
int i = 0, count, ret;
@ -476,13 +472,13 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
for (i = 0; i < count; i++) {
if (i < nbox) {
ret = i915_emit_box(dev, cmd->cliprects, i,
ret = i915_emit_box(dev, cliprects, i,
cmd->DR1, cmd->DR4);
if (ret)
return ret;
}
ret = i915_emit_cmds(dev, (int __user *)cmd->buf, cmd->sz / 4);
ret = i915_emit_cmds(dev, cmdbuf, cmd->sz / 4);
if (ret)
return ret;
}
@ -492,10 +488,10 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
}
static int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch)
drm_i915_batchbuffer_t * batch,
struct drm_clip_rect *cliprects)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_clip_rect __user *boxes = batch->cliprects;
int nbox = batch->num_cliprects;
int i = 0, count;
RING_LOCALS;
@ -511,7 +507,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
for (i = 0; i < count; i++) {
if (i < nbox) {
int ret = i915_emit_box(dev, boxes, i,
int ret = i915_emit_box(dev, cliprects, i,
batch->DR1, batch->DR4);
if (ret)
return ret;
@ -626,6 +622,7 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
master_priv->sarea_priv;
drm_i915_batchbuffer_t *batch = data;
int ret;
struct drm_clip_rect *cliprects = NULL;
if (!dev_priv->allow_batchbuffer) {
DRM_ERROR("Batchbuffer ioctl disabled\n");
@ -637,17 +634,35 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect)))
return -EFAULT;
if (batch->num_cliprects < 0)
return -EINVAL;
if (batch->num_cliprects) {
cliprects = drm_calloc(batch->num_cliprects,
sizeof(struct drm_clip_rect),
DRM_MEM_DRIVER);
if (cliprects == NULL)
return -ENOMEM;
ret = copy_from_user(cliprects, batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect));
if (ret != 0)
goto fail_free;
}
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_batchbuffer(dev, batch);
ret = i915_dispatch_batchbuffer(dev, batch, cliprects);
mutex_unlock(&dev->struct_mutex);
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
fail_free:
drm_free(cliprects,
batch->num_cliprects * sizeof(struct drm_clip_rect),
DRM_MEM_DRIVER);
return ret;
}
@ -659,6 +674,8 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
drm_i915_sarea_t *sarea_priv = (drm_i915_sarea_t *)
master_priv->sarea_priv;
drm_i915_cmdbuffer_t *cmdbuf = data;
struct drm_clip_rect *cliprects = NULL;
void *batch_data;
int ret;
DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
@ -666,25 +683,50 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (cmdbuf->num_cliprects &&
DRM_VERIFYAREA_READ(cmdbuf->cliprects,
cmdbuf->num_cliprects *
sizeof(struct drm_clip_rect))) {
DRM_ERROR("Fault accessing cliprects\n");
return -EFAULT;
if (cmdbuf->num_cliprects < 0)
return -EINVAL;
batch_data = drm_alloc(cmdbuf->sz, DRM_MEM_DRIVER);
if (batch_data == NULL)
return -ENOMEM;
ret = copy_from_user(batch_data, cmdbuf->buf, cmdbuf->sz);
if (ret != 0)
goto fail_batch_free;
if (cmdbuf->num_cliprects) {
cliprects = drm_calloc(cmdbuf->num_cliprects,
sizeof(struct drm_clip_rect),
DRM_MEM_DRIVER);
if (cliprects == NULL)
goto fail_batch_free;
ret = copy_from_user(cliprects, cmdbuf->cliprects,
cmdbuf->num_cliprects *
sizeof(struct drm_clip_rect));
if (ret != 0)
goto fail_clip_free;
}
mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
ret = i915_dispatch_cmdbuffer(dev, cmdbuf, cliprects, batch_data);
mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
return ret;
goto fail_batch_free;
}
if (sarea_priv)
sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
return 0;
fail_batch_free:
drm_free(batch_data, cmdbuf->sz, DRM_MEM_DRIVER);
fail_clip_free:
drm_free(cliprects,
cmdbuf->num_cliprects * sizeof(struct drm_clip_rect),
DRM_MEM_DRIVER);
return ret;
}
static int i915_flip_bufs(struct drm_device *dev, void *data,

Просмотреть файл

@ -150,8 +150,10 @@ static struct drm_driver driver = {
.get_reg_ofs = drm_core_get_reg_ofs,
.master_create = i915_master_create,
.master_destroy = i915_master_destroy,
.proc_init = i915_gem_proc_init,
.proc_cleanup = i915_gem_proc_cleanup,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = i915_gem_debugfs_init,
.debugfs_cleanup = i915_gem_debugfs_cleanup,
#endif
.gem_init_object = i915_gem_init_object,
.gem_free_object = i915_gem_free_object,
.gem_vm_ops = &i915_gem_vm_ops,

Просмотреть файл

@ -404,7 +404,8 @@ struct drm_i915_gem_object {
/** AGP memory structure for our GTT binding. */
DRM_AGP_MEM *agp_mem;
struct page **page_list;
struct page **pages;
int pages_refcount;
/**
* Current offset of the object in GTT space.
@ -519,7 +520,7 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
extern int i915_emit_box(struct drm_device *dev,
struct drm_clip_rect __user *boxes,
struct drm_clip_rect *boxes,
int i, int DR1, int DR4);
/* i915_irq.c */
@ -604,8 +605,6 @@ int i915_gem_get_tiling(struct drm_device *dev, void *data,
int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_proc_init(struct drm_minor *minor);
void i915_gem_proc_cleanup(struct drm_minor *minor);
int i915_gem_init_object(struct drm_gem_object *obj);
void i915_gem_free_object(struct drm_gem_object *obj);
int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
@ -649,6 +648,10 @@ void i915_gem_dump_object(struct drm_gem_object *obj, int len,
const char *where, uint32_t mark);
void i915_dump_lru(struct drm_device *dev, const char *where);
/* i915_debugfs.c */
int i915_gem_debugfs_init(struct drm_minor *minor);
void i915_gem_debugfs_cleanup(struct drm_minor *minor);
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
@ -784,15 +787,21 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
(dev)->pci_device == 0x2E22 || \
IS_GM45(dev))
#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
#define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \
(dev)->pci_device == 0x29B2 || \
(dev)->pci_device == 0x29D2)
(dev)->pci_device == 0x29D2 || \
(IS_IGD(dev)))
#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev))
#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
IS_IGD(dev))
#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -0,0 +1,257 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Keith Packard <keithp@keithp.com>
*
*/
#include <linux/seq_file.h>
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#define DRM_I915_RING_DEBUG 1
#if defined(CONFIG_DEBUG_FS)
#define ACTIVE_LIST 1
#define FLUSHING_LIST 2
#define INACTIVE_LIST 3
static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
{
if (obj_priv->user_pin_count > 0)
return "P";
else if (obj_priv->pin_count > 0)
return "p";
else
return " ";
}
static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
{
switch (obj_priv->tiling_mode) {
default:
case I915_TILING_NONE: return " ";
case I915_TILING_X: return "X";
case I915_TILING_Y: return "Y";
}
}
static int i915_gem_object_list_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
uintptr_t list = (uintptr_t) node->info_ent->data;
struct list_head *head;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
switch (list) {
case ACTIVE_LIST:
seq_printf(m, "Active:\n");
head = &dev_priv->mm.active_list;
break;
case INACTIVE_LIST:
seq_printf(m, "Inctive:\n");
head = &dev_priv->mm.inactive_list;
break;
case FLUSHING_LIST:
seq_printf(m, "Flushing:\n");
head = &dev_priv->mm.flushing_list;
break;
default:
DRM_INFO("Ooops, unexpected list\n");
return 0;
}
list_for_each_entry(obj_priv, head, list)
{
struct drm_gem_object *obj = obj_priv->obj;
seq_printf(m, " %p: %s %08x %08x %d",
obj,
get_pin_flag(obj_priv),
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
if (obj->name)
seq_printf(m, " (name: %d)", obj->name);
if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d\n", obj_priv->fence_reg);
seq_printf(m, "\n");
}
return 0;
}
static int i915_gem_request_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_request *gem_request;
seq_printf(m, "Request:\n");
list_for_each_entry(gem_request, &dev_priv->mm.request_list, list) {
seq_printf(m, " %d @ %d\n",
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies));
}
return 0;
}
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
if (dev_priv->hw_status_page != NULL) {
seq_printf(m, "Current sequence: %d\n",
i915_get_gem_seqno(dev));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
seq_printf(m, "Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
return 0;
}
static int i915_interrupt_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
seq_printf(m, "Interrupt enable: %08x\n",
I915_READ(IER));
seq_printf(m, "Interrupt identity: %08x\n",
I915_READ(IIR));
seq_printf(m, "Interrupt mask: %08x\n",
I915_READ(IMR));
seq_printf(m, "Pipe A stat: %08x\n",
I915_READ(PIPEASTAT));
seq_printf(m, "Pipe B stat: %08x\n",
I915_READ(PIPEBSTAT));
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
if (dev_priv->hw_status_page != NULL) {
seq_printf(m, "Current sequence: %d\n",
i915_get_gem_seqno(dev));
} else {
seq_printf(m, "Current sequence: hws uninitialized\n");
}
seq_printf(m, "Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
seq_printf(m, "IRQ sequence: %d\n",
dev_priv->mm.irq_gem_seqno);
return 0;
}
static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
for (i = 0; i < dev_priv->num_fence_regs; i++) {
struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
if (obj == NULL) {
seq_printf(m, "Fenced object[%2d] = unused\n", i);
} else {
struct drm_i915_gem_object *obj_priv;
obj_priv = obj->driver_private;
seq_printf(m, "Fenced object[%2d] = %p: %s "
"%08x %08zx %08x %s %08x %08x %d",
i, obj, get_pin_flag(obj_priv),
obj_priv->gtt_offset,
obj->size, obj_priv->stride,
get_tiling_flag(obj_priv),
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
if (obj->name)
seq_printf(m, " (name: %d)", obj->name);
seq_printf(m, "\n");
}
}
return 0;
}
static int i915_hws_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int i;
volatile u32 *hws;
hws = (volatile u32 *)dev_priv->hw_status_page;
if (hws == NULL)
return 0;
for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
seq_printf(m, "0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
i * 4,
hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
}
return 0;
}
static struct drm_info_list i915_gem_debugfs_list[] = {
{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
{"i915_gem_request", i915_gem_request_info, 0},
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
{"i915_gem_hws", i915_hws_info, 0},
};
#define I915_GEM_DEBUGFS_ENTRIES ARRAY_SIZE(i915_gem_debugfs_list)
int i915_gem_debugfs_init(struct drm_minor *minor)
{
return drm_debugfs_create_files(i915_gem_debugfs_list,
I915_GEM_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
}
void i915_gem_debugfs_cleanup(struct drm_minor *minor)
{
drm_debugfs_remove_files(i915_gem_debugfs_list,
I915_GEM_DEBUGFS_ENTRIES, minor);
}
#endif /* CONFIG_DEBUG_FS */

Просмотреть файл

@ -1,334 +0,0 @@
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <eric@anholt.net>
* Keith Packard <keithp@keithp.com>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
static int i915_gem_active_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Active:\n");
list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
list)
{
struct drm_gem_object *obj = obj_priv->obj;
if (obj->name) {
DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
obj, obj->name,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
} else {
DRM_PROC_PRINT(" %p: %08x %08x %d\n",
obj,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
}
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Flushing:\n");
list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
list)
{
struct drm_gem_object *obj = obj_priv->obj;
if (obj->name) {
DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
obj, obj->name,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
} else {
DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
}
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj_priv;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Inactive:\n");
list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
list)
{
struct drm_gem_object *obj = obj_priv->obj;
if (obj->name) {
DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n",
obj, obj->name,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
} else {
DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj,
obj->read_domains, obj->write_domain,
obj_priv->last_rendering_seqno);
}
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_request_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_request *gem_request;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Request:\n");
list_for_each_entry(gem_request, &dev_priv->mm.request_list,
list)
{
DRM_PROC_PRINT(" %d @ %d\n",
gem_request->seqno,
(int) (jiffies - gem_request->emitted_jiffies));
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
if (dev_priv->hw_status_page != NULL) {
DRM_PROC_PRINT("Current sequence: %d\n",
i915_get_gem_seqno(dev));
} else {
DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
}
DRM_PROC_PRINT("Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_interrupt_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int len = 0;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
DRM_PROC_PRINT("Interrupt enable: %08x\n",
I915_READ(IER));
DRM_PROC_PRINT("Interrupt identity: %08x\n",
I915_READ(IIR));
DRM_PROC_PRINT("Interrupt mask: %08x\n",
I915_READ(IMR));
DRM_PROC_PRINT("Pipe A stat: %08x\n",
I915_READ(PIPEASTAT));
DRM_PROC_PRINT("Pipe B stat: %08x\n",
I915_READ(PIPEBSTAT));
DRM_PROC_PRINT("Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
if (dev_priv->hw_status_page != NULL) {
DRM_PROC_PRINT("Current sequence: %d\n",
i915_get_gem_seqno(dev));
} else {
DRM_PROC_PRINT("Current sequence: hws uninitialized\n");
}
DRM_PROC_PRINT("Waiter sequence: %d\n",
dev_priv->mm.waiting_gem_seqno);
DRM_PROC_PRINT("IRQ sequence: %d\n",
dev_priv->mm.irq_gem_seqno);
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static int i915_hws_info(char *buf, char **start, off_t offset,
int request, int *eof, void *data)
{
struct drm_minor *minor = (struct drm_minor *) data;
struct drm_device *dev = minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
int len = 0, i;
volatile u32 *hws;
if (offset > DRM_PROC_LIMIT) {
*eof = 1;
return 0;
}
hws = (volatile u32 *)dev_priv->hw_status_page;
if (hws == NULL) {
*eof = 1;
return 0;
}
*start = &buf[offset];
*eof = 0;
for (i = 0; i < 4096 / sizeof(u32) / 4; i += 4) {
DRM_PROC_PRINT("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
i * 4,
hws[i], hws[i + 1], hws[i + 2], hws[i + 3]);
}
if (len > request + offset)
return request;
*eof = 1;
return len - offset;
}
static struct drm_proc_list {
/** file name */
const char *name;
/** proc callback*/
int (*f) (char *, char **, off_t, int, int *, void *);
} i915_gem_proc_list[] = {
{"i915_gem_active", i915_gem_active_info},
{"i915_gem_flushing", i915_gem_flushing_info},
{"i915_gem_inactive", i915_gem_inactive_info},
{"i915_gem_request", i915_gem_request_info},
{"i915_gem_seqno", i915_gem_seqno_info},
{"i915_gem_interrupt", i915_interrupt_info},
{"i915_gem_hws", i915_hws_info},
};
#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
int i915_gem_proc_init(struct drm_minor *minor)
{
struct proc_dir_entry *ent;
int i, j;
for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
ent = create_proc_entry(i915_gem_proc_list[i].name,
S_IFREG | S_IRUGO, minor->dev_root);
if (!ent) {
DRM_ERROR("Cannot create /proc/dri/.../%s\n",
i915_gem_proc_list[i].name);
for (j = 0; j < i; j++)
remove_proc_entry(i915_gem_proc_list[i].name,
minor->dev_root);
return -1;
}
ent->read_proc = i915_gem_proc_list[i].f;
ent->data = minor;
}
return 0;
}
void i915_gem_proc_cleanup(struct drm_minor *minor)
{
int i;
if (!minor->dev_root)
return;
for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
}

Просмотреть файл

@ -96,16 +96,16 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
*/
swizzle_x = I915_BIT_6_SWIZZLE_NONE;
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
} else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) ||
IS_GM45(dev)) {
} else if (IS_MOBILE(dev)) {
uint32_t dcc;
/* On 915-945 and GM965, channel interleave by the CPU is
* determined by DCC. The CPU will alternate based on bit 6
* in interleaved mode, and the GPU will then also alternate
* on bit 6, 9, and 10 for X, but the CPU may also optionally
* alternate based on bit 17 (XOR not disabled and XOR
* bit == 17).
/* On mobile 9xx chipsets, channel interleave by the CPU is
* determined by DCC. For single-channel, neither the CPU
* nor the GPU do swizzling. For dual channel interleaved,
* the GPU's interleave is bit 9 and 10 for X tiled, and bit
* 9 for Y tiled. The CPU's interleave is independent, and
* can be based on either bit 11 (haven't seen this yet) or
* bit 17 (common).
*/
dcc = I915_READ(DCC);
switch (dcc & DCC_ADDRESSING_MODE_MASK) {
@ -115,19 +115,18 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
swizzle_y = I915_BIT_6_SWIZZLE_NONE;
break;
case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
if (IS_I915G(dev) || IS_I915GM(dev) ||
dcc & DCC_CHANNEL_XOR_DISABLE) {
if (dcc & DCC_CHANNEL_XOR_DISABLE) {
/* This is the base swizzling by the GPU for
* tiled buffers.
*/
swizzle_x = I915_BIT_6_SWIZZLE_9_10;
swizzle_y = I915_BIT_6_SWIZZLE_9;
} else if ((IS_I965GM(dev) || IS_GM45(dev)) &&
(dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
/* GM965/GM45 does either bit 11 or bit 17
* swizzling.
*/
} else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
/* Bit 11 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
swizzle_y = I915_BIT_6_SWIZZLE_9_11;
} else {
/* Bit 17 or perhaps other swizzling */
/* Bit 17 swizzling by the CPU in addition. */
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
}

Просмотреть файл

@ -359,6 +359,7 @@
#define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */
#define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */
#define DPLL_FPA01_P1_POST_DIV_MASK_IGD 0x00ff8000 /* IGD */
#define I915_FIFO_UNDERRUN_STATUS (1UL<<31)
#define I915_CRC_ERROR_ENABLE (1UL<<29)
@ -435,6 +436,7 @@
*/
#define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000
#define DPLL_FPA01_P1_POST_DIV_SHIFT 16
#define DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
/* i830, required in DVO non-gang */
#define PLL_P2_DIVIDE_BY_4 (1 << 23)
#define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */
@ -501,10 +503,12 @@
#define FPB0 0x06048
#define FPB1 0x0604c
#define FP_N_DIV_MASK 0x003f0000
#define FP_N_IGD_DIV_MASK 0x00ff0000
#define FP_N_DIV_SHIFT 16
#define FP_M1_DIV_MASK 0x00003f00
#define FP_M1_DIV_SHIFT 8
#define FP_M2_DIV_MASK 0x0000003f
#define FP_M2_IGD_DIV_MASK 0x000000ff
#define FP_M2_DIV_SHIFT 0
#define DPLL_TEST 0x606c
#define DPLLB_TEST_SDVO_DIV_1 (0 << 22)
@ -629,6 +633,22 @@
#define TV_HOTPLUG_INT_EN (1 << 18)
#define CRT_HOTPLUG_INT_EN (1 << 9)
#define CRT_HOTPLUG_FORCE_DETECT (1 << 3)
#define CRT_HOTPLUG_ACTIVATION_PERIOD_32 (0 << 8)
/* must use period 64 on GM45 according to docs */
#define CRT_HOTPLUG_ACTIVATION_PERIOD_64 (1 << 8)
#define CRT_HOTPLUG_DAC_ON_TIME_2M (0 << 7)
#define CRT_HOTPLUG_DAC_ON_TIME_4M (1 << 7)
#define CRT_HOTPLUG_VOLTAGE_COMPARE_40 (0 << 5)
#define CRT_HOTPLUG_VOLTAGE_COMPARE_50 (1 << 5)
#define CRT_HOTPLUG_VOLTAGE_COMPARE_60 (2 << 5)
#define CRT_HOTPLUG_VOLTAGE_COMPARE_70 (3 << 5)
#define CRT_HOTPLUG_VOLTAGE_COMPARE_MASK (3 << 5)
#define CRT_HOTPLUG_DETECT_DELAY_1G (0 << 4)
#define CRT_HOTPLUG_DETECT_DELAY_2G (1 << 4)
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */
#define PORT_HOTPLUG_STAT 0x61114
#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
@ -856,7 +876,7 @@
*/
# define TV_ENC_C0_FIX (1 << 10)
/** Bits that must be preserved by software */
# define TV_CTL_SAVE ((3 << 8) | (3 << 6))
# define TV_CTL_SAVE ((1 << 11) | (3 << 9) | (7 << 6) | 0xf)
# define TV_FUSE_STATE_MASK (3 << 4)
/** Read-only state that reports all features enabled */
# define TV_FUSE_STATE_ENABLED (0 << 4)

Просмотреть файл

@ -162,13 +162,13 @@ struct bdb_lvds_options {
u8 panel_type;
u8 rsvd1;
/* LVDS capabilities, stored in a dword */
u8 rsvd2:1;
u8 lvds_edid:1;
u8 pixel_dither:1;
u8 pfit_ratio_auto:1;
u8 pfit_gfx_mode_enhanced:1;
u8 pfit_text_mode_enhanced:1;
u8 pfit_mode:2;
u8 pfit_text_mode_enhanced:1;
u8 pfit_gfx_mode_enhanced:1;
u8 pfit_ratio_auto:1;
u8 pixel_dither:1;
u8 lvds_edid:1;
u8 rsvd2:1;
u8 rsvd4;
} __attribute__((packed));

Просмотреть файл

@ -64,11 +64,21 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
static int intel_crt_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct drm_device *dev = connector->dev;
int max_clock = 0;
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
if (mode->clock > 400000 || mode->clock < 25000)
return MODE_CLOCK_RANGE;
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
if (!IS_I9XX(dev))
max_clock = 350000;
else
max_clock = 400000;
if (mode->clock > max_clock)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
@ -113,10 +123,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
adpa |= ADPA_VSYNC_ACTIVE_HIGH;
if (intel_crtc->pipe == 0)
if (intel_crtc->pipe == 0) {
adpa |= ADPA_PIPE_A_SELECT;
else
I915_WRITE(BCLRPAT_A, 0);
} else {
adpa |= ADPA_PIPE_B_SELECT;
I915_WRITE(BCLRPAT_B, 0);
}
I915_WRITE(ADPA, adpa);
}
@ -133,20 +146,39 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
u32 temp;
u32 hotplug_en;
int i, tries = 0;
/*
* On 4 series desktop, CRT detect sequence need to be done twice
* to get a reliable result.
*/
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
if (IS_G4X(dev) && !IS_GM45(dev))
tries = 2;
else
tries = 1;
hotplug_en = I915_READ(PORT_HOTPLUG_EN);
hotplug_en &= ~(CRT_HOTPLUG_MASK);
hotplug_en |= CRT_HOTPLUG_FORCE_DETECT;
temp = I915_READ(PORT_HOTPLUG_EN);
if (IS_GM45(dev))
hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64;
I915_WRITE(PORT_HOTPLUG_EN,
temp | CRT_HOTPLUG_FORCE_DETECT | (1 << 5));
hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50;
do {
if (!(I915_READ(PORT_HOTPLUG_EN) & CRT_HOTPLUG_FORCE_DETECT))
break;
msleep(1);
} while (time_after(timeout, jiffies));
for (i = 0; i < tries ; i++) {
unsigned long timeout;
/* turn on the FORCE_DETECT */
I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
timeout = jiffies + msecs_to_jiffies(1000);
/* wait for FORCE_DETECT to go off */
do {
if (!(I915_READ(PORT_HOTPLUG_EN) &
CRT_HOTPLUG_FORCE_DETECT))
break;
msleep(1);
} while (time_after(timeout, jiffies));
}
if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
CRT_HOTPLUG_MONITOR_COLOR)

Просмотреть файл

@ -56,11 +56,13 @@ typedef struct {
} intel_p2_t;
#define INTEL_P2_NUM 2
typedef struct {
typedef struct intel_limit intel_limit_t;
struct intel_limit {
intel_range_t dot, vco, n, m, m1, m2, p, p1;
intel_p2_t p2;
} intel_limit_t;
bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
int, int, intel_clock_t *);
};
#define I8XX_DOT_MIN 25000
#define I8XX_DOT_MAX 350000
@ -90,18 +92,32 @@ typedef struct {
#define I9XX_DOT_MAX 400000
#define I9XX_VCO_MIN 1400000
#define I9XX_VCO_MAX 2800000
#define IGD_VCO_MIN 1700000
#define IGD_VCO_MAX 3500000
#define I9XX_N_MIN 1
#define I9XX_N_MAX 6
/* IGD's Ncounter is a ring counter */
#define IGD_N_MIN 3
#define IGD_N_MAX 6
#define I9XX_M_MIN 70
#define I9XX_M_MAX 120
#define IGD_M_MIN 2
#define IGD_M_MAX 256
#define I9XX_M1_MIN 10
#define I9XX_M1_MAX 22
#define I9XX_M2_MIN 5
#define I9XX_M2_MAX 9
/* IGD M1 is reserved, and must be 0 */
#define IGD_M1_MIN 0
#define IGD_M1_MAX 0
#define IGD_M2_MIN 0
#define IGD_M2_MAX 254
#define I9XX_P_SDVO_DAC_MIN 5
#define I9XX_P_SDVO_DAC_MAX 80
#define I9XX_P_LVDS_MIN 7
#define I9XX_P_LVDS_MAX 98
#define IGD_P_LVDS_MIN 7
#define IGD_P_LVDS_MAX 112
#define I9XX_P1_MIN 1
#define I9XX_P1_MAX 8
#define I9XX_P2_SDVO_DAC_SLOW 10
@ -115,6 +131,97 @@ typedef struct {
#define INTEL_LIMIT_I8XX_LVDS 1
#define INTEL_LIMIT_I9XX_SDVO_DAC 2
#define INTEL_LIMIT_I9XX_LVDS 3
#define INTEL_LIMIT_G4X_SDVO 4
#define INTEL_LIMIT_G4X_HDMI_DAC 5
#define INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS 6
#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
#define INTEL_LIMIT_IGD_SDVO_DAC 8
#define INTEL_LIMIT_IGD_LVDS 9
/*The parameter is for SDVO on G4x platform*/
#define G4X_DOT_SDVO_MIN 25000
#define G4X_DOT_SDVO_MAX 270000
#define G4X_VCO_MIN 1750000
#define G4X_VCO_MAX 3500000
#define G4X_N_SDVO_MIN 1
#define G4X_N_SDVO_MAX 4
#define G4X_M_SDVO_MIN 104
#define G4X_M_SDVO_MAX 138
#define G4X_M1_SDVO_MIN 17
#define G4X_M1_SDVO_MAX 23
#define G4X_M2_SDVO_MIN 5
#define G4X_M2_SDVO_MAX 11
#define G4X_P_SDVO_MIN 10
#define G4X_P_SDVO_MAX 30
#define G4X_P1_SDVO_MIN 1
#define G4X_P1_SDVO_MAX 3
#define G4X_P2_SDVO_SLOW 10
#define G4X_P2_SDVO_FAST 10
#define G4X_P2_SDVO_LIMIT 270000
/*The parameter is for HDMI_DAC on G4x platform*/
#define G4X_DOT_HDMI_DAC_MIN 22000
#define G4X_DOT_HDMI_DAC_MAX 400000
#define G4X_N_HDMI_DAC_MIN 1
#define G4X_N_HDMI_DAC_MAX 4
#define G4X_M_HDMI_DAC_MIN 104
#define G4X_M_HDMI_DAC_MAX 138
#define G4X_M1_HDMI_DAC_MIN 16
#define G4X_M1_HDMI_DAC_MAX 23
#define G4X_M2_HDMI_DAC_MIN 5
#define G4X_M2_HDMI_DAC_MAX 11
#define G4X_P_HDMI_DAC_MIN 5
#define G4X_P_HDMI_DAC_MAX 80
#define G4X_P1_HDMI_DAC_MIN 1
#define G4X_P1_HDMI_DAC_MAX 8
#define G4X_P2_HDMI_DAC_SLOW 10
#define G4X_P2_HDMI_DAC_FAST 5
#define G4X_P2_HDMI_DAC_LIMIT 165000
/*The parameter is for SINGLE_CHANNEL_LVDS on G4x platform*/
#define G4X_DOT_SINGLE_CHANNEL_LVDS_MIN 20000
#define G4X_DOT_SINGLE_CHANNEL_LVDS_MAX 115000
#define G4X_N_SINGLE_CHANNEL_LVDS_MIN 1
#define G4X_N_SINGLE_CHANNEL_LVDS_MAX 3
#define G4X_M_SINGLE_CHANNEL_LVDS_MIN 104
#define G4X_M_SINGLE_CHANNEL_LVDS_MAX 138
#define G4X_M1_SINGLE_CHANNEL_LVDS_MIN 17
#define G4X_M1_SINGLE_CHANNEL_LVDS_MAX 23
#define G4X_M2_SINGLE_CHANNEL_LVDS_MIN 5
#define G4X_M2_SINGLE_CHANNEL_LVDS_MAX 11
#define G4X_P_SINGLE_CHANNEL_LVDS_MIN 28
#define G4X_P_SINGLE_CHANNEL_LVDS_MAX 112
#define G4X_P1_SINGLE_CHANNEL_LVDS_MIN 2
#define G4X_P1_SINGLE_CHANNEL_LVDS_MAX 8
#define G4X_P2_SINGLE_CHANNEL_LVDS_SLOW 14
#define G4X_P2_SINGLE_CHANNEL_LVDS_FAST 14
#define G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT 0
/*The parameter is for DUAL_CHANNEL_LVDS on G4x platform*/
#define G4X_DOT_DUAL_CHANNEL_LVDS_MIN 80000
#define G4X_DOT_DUAL_CHANNEL_LVDS_MAX 224000
#define G4X_N_DUAL_CHANNEL_LVDS_MIN 1
#define G4X_N_DUAL_CHANNEL_LVDS_MAX 3
#define G4X_M_DUAL_CHANNEL_LVDS_MIN 104
#define G4X_M_DUAL_CHANNEL_LVDS_MAX 138
#define G4X_M1_DUAL_CHANNEL_LVDS_MIN 17
#define G4X_M1_DUAL_CHANNEL_LVDS_MAX 23
#define G4X_M2_DUAL_CHANNEL_LVDS_MIN 5
#define G4X_M2_DUAL_CHANNEL_LVDS_MAX 11
#define G4X_P_DUAL_CHANNEL_LVDS_MIN 14
#define G4X_P_DUAL_CHANNEL_LVDS_MAX 42
#define G4X_P1_DUAL_CHANNEL_LVDS_MIN 2
#define G4X_P1_DUAL_CHANNEL_LVDS_MAX 6
#define G4X_P2_DUAL_CHANNEL_LVDS_SLOW 7
#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock);
static const intel_limit_t intel_limits[] = {
{ /* INTEL_LIMIT_I8XX_DVO_DAC */
@ -128,6 +235,7 @@ static const intel_limit_t intel_limits[] = {
.p1 = { .min = I8XX_P1_MIN, .max = I8XX_P1_MAX },
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_SLOW, .p2_fast = I8XX_P2_FAST },
.find_pll = intel_find_best_PLL,
},
{ /* INTEL_LIMIT_I8XX_LVDS */
.dot = { .min = I8XX_DOT_MIN, .max = I8XX_DOT_MAX },
@ -140,6 +248,7 @@ static const intel_limit_t intel_limits[] = {
.p1 = { .min = I8XX_P1_LVDS_MIN, .max = I8XX_P1_LVDS_MAX },
.p2 = { .dot_limit = I8XX_P2_SLOW_LIMIT,
.p2_slow = I8XX_P2_LVDS_SLOW, .p2_fast = I8XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
},
{ /* INTEL_LIMIT_I9XX_SDVO_DAC */
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@ -152,6 +261,7 @@ static const intel_limit_t intel_limits[] = {
.p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
.find_pll = intel_find_best_PLL,
},
{ /* INTEL_LIMIT_I9XX_LVDS */
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
@ -167,19 +277,157 @@ static const intel_limit_t intel_limits[] = {
*/
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_FAST },
.find_pll = intel_find_best_PLL,
},
/* below parameter and function is for G4X Chipset Family*/
{ /* INTEL_LIMIT_G4X_SDVO */
.dot = { .min = G4X_DOT_SDVO_MIN, .max = G4X_DOT_SDVO_MAX },
.vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
.n = { .min = G4X_N_SDVO_MIN, .max = G4X_N_SDVO_MAX },
.m = { .min = G4X_M_SDVO_MIN, .max = G4X_M_SDVO_MAX },
.m1 = { .min = G4X_M1_SDVO_MIN, .max = G4X_M1_SDVO_MAX },
.m2 = { .min = G4X_M2_SDVO_MIN, .max = G4X_M2_SDVO_MAX },
.p = { .min = G4X_P_SDVO_MIN, .max = G4X_P_SDVO_MAX },
.p1 = { .min = G4X_P1_SDVO_MIN, .max = G4X_P1_SDVO_MAX},
.p2 = { .dot_limit = G4X_P2_SDVO_LIMIT,
.p2_slow = G4X_P2_SDVO_SLOW,
.p2_fast = G4X_P2_SDVO_FAST
},
.find_pll = intel_g4x_find_best_PLL,
},
{ /* INTEL_LIMIT_G4X_HDMI_DAC */
.dot = { .min = G4X_DOT_HDMI_DAC_MIN, .max = G4X_DOT_HDMI_DAC_MAX },
.vco = { .min = G4X_VCO_MIN, .max = G4X_VCO_MAX},
.n = { .min = G4X_N_HDMI_DAC_MIN, .max = G4X_N_HDMI_DAC_MAX },
.m = { .min = G4X_M_HDMI_DAC_MIN, .max = G4X_M_HDMI_DAC_MAX },
.m1 = { .min = G4X_M1_HDMI_DAC_MIN, .max = G4X_M1_HDMI_DAC_MAX },
.m2 = { .min = G4X_M2_HDMI_DAC_MIN, .max = G4X_M2_HDMI_DAC_MAX },
.p = { .min = G4X_P_HDMI_DAC_MIN, .max = G4X_P_HDMI_DAC_MAX },
.p1 = { .min = G4X_P1_HDMI_DAC_MIN, .max = G4X_P1_HDMI_DAC_MAX},
.p2 = { .dot_limit = G4X_P2_HDMI_DAC_LIMIT,
.p2_slow = G4X_P2_HDMI_DAC_SLOW,
.p2_fast = G4X_P2_HDMI_DAC_FAST
},
.find_pll = intel_g4x_find_best_PLL,
},
{ /* INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS */
.dot = { .min = G4X_DOT_SINGLE_CHANNEL_LVDS_MIN,
.max = G4X_DOT_SINGLE_CHANNEL_LVDS_MAX },
.vco = { .min = G4X_VCO_MIN,
.max = G4X_VCO_MAX },
.n = { .min = G4X_N_SINGLE_CHANNEL_LVDS_MIN,
.max = G4X_N_SINGLE_CHANNEL_LVDS_MAX },
.m = { .min = G4X_M_SINGLE_CHANNEL_LVDS_MIN,
.max = G4X_M_SINGLE_CHANNEL_LVDS_MAX },
.m1 = { .min = G4X_M1_SINGLE_CHANNEL_LVDS_MIN,
.max = G4X_M1_SINGLE_CHANNEL_LVDS_MAX },
.m2 = { .min = G4X_M2_SINGLE_CHANNEL_LVDS_MIN,
.max = G4X_M2_SINGLE_CHANNEL_LVDS_MAX },
.p = { .min = G4X_P_SINGLE_CHANNEL_LVDS_MIN,
.max = G4X_P_SINGLE_CHANNEL_LVDS_MAX },
.p1 = { .min = G4X_P1_SINGLE_CHANNEL_LVDS_MIN,
.max = G4X_P1_SINGLE_CHANNEL_LVDS_MAX },
.p2 = { .dot_limit = G4X_P2_SINGLE_CHANNEL_LVDS_LIMIT,
.p2_slow = G4X_P2_SINGLE_CHANNEL_LVDS_SLOW,
.p2_fast = G4X_P2_SINGLE_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
},
{ /* INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS */
.dot = { .min = G4X_DOT_DUAL_CHANNEL_LVDS_MIN,
.max = G4X_DOT_DUAL_CHANNEL_LVDS_MAX },
.vco = { .min = G4X_VCO_MIN,
.max = G4X_VCO_MAX },
.n = { .min = G4X_N_DUAL_CHANNEL_LVDS_MIN,
.max = G4X_N_DUAL_CHANNEL_LVDS_MAX },
.m = { .min = G4X_M_DUAL_CHANNEL_LVDS_MIN,
.max = G4X_M_DUAL_CHANNEL_LVDS_MAX },
.m1 = { .min = G4X_M1_DUAL_CHANNEL_LVDS_MIN,
.max = G4X_M1_DUAL_CHANNEL_LVDS_MAX },
.m2 = { .min = G4X_M2_DUAL_CHANNEL_LVDS_MIN,
.max = G4X_M2_DUAL_CHANNEL_LVDS_MAX },
.p = { .min = G4X_P_DUAL_CHANNEL_LVDS_MIN,
.max = G4X_P_DUAL_CHANNEL_LVDS_MAX },
.p1 = { .min = G4X_P1_DUAL_CHANNEL_LVDS_MIN,
.max = G4X_P1_DUAL_CHANNEL_LVDS_MAX },
.p2 = { .dot_limit = G4X_P2_DUAL_CHANNEL_LVDS_LIMIT,
.p2_slow = G4X_P2_DUAL_CHANNEL_LVDS_SLOW,
.p2_fast = G4X_P2_DUAL_CHANNEL_LVDS_FAST
},
.find_pll = intel_g4x_find_best_PLL,
},
{ /* INTEL_LIMIT_IGD_SDVO */
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX},
.vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
.n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
.m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
.m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
.m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
.p = { .min = I9XX_P_SDVO_DAC_MIN, .max = I9XX_P_SDVO_DAC_MAX },
.p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
.p2 = { .dot_limit = I9XX_P2_SDVO_DAC_SLOW_LIMIT,
.p2_slow = I9XX_P2_SDVO_DAC_SLOW, .p2_fast = I9XX_P2_SDVO_DAC_FAST },
},
{ /* INTEL_LIMIT_IGD_LVDS */
.dot = { .min = I9XX_DOT_MIN, .max = I9XX_DOT_MAX },
.vco = { .min = IGD_VCO_MIN, .max = IGD_VCO_MAX },
.n = { .min = IGD_N_MIN, .max = IGD_N_MAX },
.m = { .min = IGD_M_MIN, .max = IGD_M_MAX },
.m1 = { .min = IGD_M1_MIN, .max = IGD_M1_MAX },
.m2 = { .min = IGD_M2_MIN, .max = IGD_M2_MAX },
.p = { .min = IGD_P_LVDS_MIN, .max = IGD_P_LVDS_MAX },
.p1 = { .min = I9XX_P1_MIN, .max = I9XX_P1_MAX },
/* IGD only supports single-channel mode. */
.p2 = { .dot_limit = I9XX_P2_LVDS_SLOW_LIMIT,
.p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
},
};
static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
/* LVDS with dual channel */
limit = &intel_limits
[INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS];
else
/* LVDS with dual channel */
limit = &intel_limits
[INTEL_LIMIT_G4X_SINGLE_CHANNEL_LVDS];
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
limit = &intel_limits[INTEL_LIMIT_G4X_HDMI_DAC];
} else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
limit = &intel_limits[INTEL_LIMIT_G4X_SDVO];
} else /* The option is for other outputs */
limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
return limit;
}
static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
if (IS_I9XX(dev)) {
if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
} else if (IS_I9XX(dev) && !IS_IGD(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits[INTEL_LIMIT_I9XX_LVDS];
else
limit = &intel_limits[INTEL_LIMIT_I9XX_SDVO_DAC];
} else if (IS_IGD(dev)) {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits[INTEL_LIMIT_IGD_LVDS];
else
limit = &intel_limits[INTEL_LIMIT_IGD_SDVO_DAC];
} else {
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
limit = &intel_limits[INTEL_LIMIT_I8XX_LVDS];
@ -189,8 +437,21 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
return limit;
}
static void intel_clock(int refclk, intel_clock_t *clock)
/* m1 is reserved as 0 in IGD, n is a ring counter */
static void igd_clock(int refclk, intel_clock_t *clock)
{
clock->m = clock->m2 + 2;
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / clock->n;
clock->dot = clock->vco / clock->p;
}
static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
{
if (IS_IGD(dev)) {
igd_clock(refclk, clock);
return;
}
clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
clock->p = clock->p1 * clock->p2;
clock->vco = refclk * clock->m / (clock->n + 2);
@ -226,6 +487,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
{
const intel_limit_t *limit = intel_limit (crtc);
struct drm_device *dev = crtc->dev;
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
INTELPllInvalid ("p1 out of range\n");
@ -235,7 +497,7 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
INTELPllInvalid ("m2 out of range\n");
if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
INTELPllInvalid ("m1 out of range\n");
if (clock->m1 <= clock->m2)
if (clock->m1 <= clock->m2 && !IS_IGD(dev))
INTELPllInvalid ("m1 <= m2\n");
if (clock->m < limit->m.min || limit->m.max < clock->m)
INTELPllInvalid ("m out of range\n");
@ -252,18 +514,14 @@ static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
return true;
}
/**
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
int refclk, intel_clock_t *best_clock)
static bool
intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
const intel_limit_t *limit = intel_limit(crtc);
int err = target;
if (IS_I9XX(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
@ -289,15 +547,17 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
memset (best_clock, 0, sizeof (*best_clock));
for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
for (clock.m2 = limit->m2.min; clock.m2 < clock.m1 &&
clock.m2 <= limit->m2.max; clock.m2++) {
for (clock.m2 = limit->m2.min; clock.m2 <= limit->m2.max; clock.m2++) {
/* m1 is always 0 in IGD */
if (clock.m2 >= clock.m1 && !IS_IGD(dev))
break;
for (clock.n = limit->n.min; clock.n <= limit->n.max;
clock.n++) {
for (clock.p1 = limit->p1.min;
clock.p1 <= limit->p1.max; clock.p1++) {
int this_err;
intel_clock(refclk, &clock);
intel_clock(dev, refclk, &clock);
if (!intel_PLL_is_valid(crtc, &clock))
continue;
@ -315,6 +575,63 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
return (err != target);
}
static bool
intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int target, int refclk, intel_clock_t *best_clock)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
intel_clock_t clock;
int max_n;
bool found;
/* approximately equals target * 0.00488 */
int err_most = (target >> 8) + (target >> 10);
found = false;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP)
clock.p2 = limit->p2.p2_fast;
else
clock.p2 = limit->p2.p2_slow;
} else {
if (target < limit->p2.dot_limit)
clock.p2 = limit->p2.p2_slow;
else
clock.p2 = limit->p2.p2_fast;
}
memset(best_clock, 0, sizeof(*best_clock));
max_n = limit->n.max;
/* based on hardware requriment prefer smaller n to precision */
for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
/* based on hardware requirment prefere larger m1,m2, p1 */
for (clock.m1 = limit->m1.max;
clock.m1 >= limit->m1.min; clock.m1--) {
for (clock.m2 = limit->m2.max;
clock.m2 >= limit->m2.min; clock.m2--) {
for (clock.p1 = limit->p1.max;
clock.p1 >= limit->p1.min; clock.p1--) {
int this_err;
intel_clock(dev, refclk, &clock);
if (!intel_PLL_is_valid(crtc, &clock))
continue;
this_err = abs(clock.dot - target) ;
if (this_err < err_most) {
*best_clock = clock;
err_most = this_err;
max_n = clock.n;
found = true;
}
}
}
}
}
return found;
}
void
intel_wait_for_vblank(struct drm_device *dev)
{
@ -634,7 +951,7 @@ static int intel_get_core_clock_speed(struct drm_device *dev)
return 400000;
else if (IS_I915G(dev))
return 333000;
else if (IS_I945GM(dev) || IS_845G(dev))
else if (IS_I945GM(dev) || IS_845G(dev) || IS_IGDGM(dev))
return 200000;
else if (IS_I915GM(dev)) {
u16 gcfgc = 0;
@ -733,6 +1050,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
bool is_crt = false, is_lvds = false, is_tv = false;
struct drm_mode_config *mode_config = &dev->mode_config;
struct drm_connector *connector;
const intel_limit_t *limit;
int ret;
drm_vblank_pre_modeset(dev, pipe);
@ -776,13 +1094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
refclk = 48000;
}
ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock);
/*
* Returns a set of divisors for the desired target clock with the given
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
limit = intel_limit(crtc);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
if (IS_IGD(dev))
fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
else
fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
dpll = DPLL_VGA_MODE_DIS;
if (IS_I9XX(dev)) {
@ -799,7 +1126,10 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
/* compute bitmask from p1 value */
dpll |= (1 << (clock.p1 - 1)) << 16;
if (IS_IGD(dev))
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
else
dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
switch (clock.p2) {
case 5:
dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
@ -1279,10 +1609,20 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
fp = I915_READ((pipe == 0) ? FPA1 : FPB1);
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
if (IS_IGD(dev)) {
clock.n = ffs((fp & FP_N_IGD_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
clock.m2 = (fp & FP_M2_IGD_DIV_MASK) >> FP_M2_DIV_SHIFT;
} else {
clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
}
if (IS_I9XX(dev)) {
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
if (IS_IGD(dev))
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_IGD) >>
DPLL_FPA01_P1_POST_DIV_SHIFT_IGD);
else
clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
DPLL_FPA01_P1_POST_DIV_SHIFT);
switch (dpll & DPLL_MODE_MASK) {
@ -1301,7 +1641,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
}
/* XXX: Handle the 100Mhz refclk */
intel_clock(96000, &clock);
intel_clock(dev, 96000, &clock);
} else {
bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
@ -1313,9 +1653,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
if ((dpll & PLL_REF_INPUT_MASK) ==
PLLB_REF_INPUT_SPREADSPECTRUMIN) {
/* XXX: might not be 66MHz */
intel_clock(66000, &clock);
intel_clock(dev, 66000, &clock);
} else
intel_clock(48000, &clock);
intel_clock(dev, 48000, &clock);
} else {
if (dpll & PLL_P1_DIVIDE_BY_TWO)
clock.p1 = 2;
@ -1328,7 +1668,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
else
clock.p2 = 2;
intel_clock(48000, &clock);
intel_clock(dev, 48000, &clock);
}
}
@ -1474,13 +1814,21 @@ static void intel_setup_outputs(struct drm_device *dev)
if (IS_I9XX(dev)) {
int found;
u32 reg;
if (I915_READ(SDVOB) & SDVO_DETECTED) {
found = intel_sdvo_init(dev, SDVOB);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOB);
}
if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) {
/* Before G4X SDVOC doesn't have its own detect register */
if (IS_G4X(dev))
reg = SDVOC;
else
reg = SDVOB;
if (I915_READ(reg) & SDVO_DETECTED) {
found = intel_sdvo_init(dev, SDVOC);
if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
intel_hdmi_init(dev, SDVOC);

Просмотреть файл

@ -265,7 +265,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
pfit_control = 0;
if (!IS_I965G(dev)) {
if (dev_priv->panel_wants_dither)
if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
pfit_control |= PANEL_8TO6_DITHER_ENABLE;
}
else

Просмотреть файл

@ -217,8 +217,8 @@ static const u32 filter_table[] = {
*/
static const struct color_conversion ntsc_m_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
};
static const struct video_levels ntsc_m_levels_composite = {
@ -226,9 +226,9 @@ static const struct video_levels ntsc_m_levels_composite = {
};
static const struct color_conversion ntsc_m_csc_svideo = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
};
static const struct video_levels ntsc_m_levels_svideo = {
@ -237,8 +237,8 @@ static const struct video_levels ntsc_m_levels_svideo = {
static const struct color_conversion ntsc_j_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0119,
.ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0f00,
.rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0f00,
.ru = 0x074c, .gu = 0x0546, .bu = 0x05ec, .au = 0x0200,
.rv = 0x035a, .gv = 0x0322, .bv = 0x06e1, .av = 0x0200,
};
static const struct video_levels ntsc_j_levels_composite = {
@ -247,8 +247,8 @@ static const struct video_levels ntsc_j_levels_composite = {
static const struct color_conversion ntsc_j_csc_svideo = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x014c,
.ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0f00,
.rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0f00,
.ru = 0x0788, .gu = 0x0581, .bu = 0x0322, .au = 0x0200,
.rv = 0x0399, .gv = 0x0356, .bv = 0x070a, .av = 0x0200,
};
static const struct video_levels ntsc_j_levels_svideo = {
@ -257,8 +257,8 @@ static const struct video_levels ntsc_j_levels_svideo = {
static const struct color_conversion pal_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0113,
.ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0f00,
.rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0f00,
.ru = 0x0745, .gu = 0x053f, .bu = 0x05e1, .au = 0x0200,
.rv = 0x0353, .gv = 0x031c, .bv = 0x06dc, .av = 0x0200,
};
static const struct video_levels pal_levels_composite = {
@ -267,8 +267,8 @@ static const struct video_levels pal_levels_composite = {
static const struct color_conversion pal_csc_svideo = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
.ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0f00,
.rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0f00,
.ru = 0x0780, .gu = 0x0579, .bu = 0x031c, .au = 0x0200,
.rv = 0x0390, .gv = 0x034f, .bv = 0x0705, .av = 0x0200,
};
static const struct video_levels pal_levels_svideo = {
@ -277,8 +277,8 @@ static const struct video_levels pal_levels_svideo = {
static const struct color_conversion pal_m_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
};
static const struct video_levels pal_m_levels_composite = {
@ -286,9 +286,9 @@ static const struct video_levels pal_m_levels_composite = {
};
static const struct color_conversion pal_m_csc_svideo = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
};
static const struct video_levels pal_m_levels_svideo = {
@ -297,8 +297,8 @@ static const struct video_levels pal_m_levels_svideo = {
static const struct color_conversion pal_n_csc_composite = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0104,
.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0f00,
.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0f00,
.ru = 0x0733, .gu = 0x052d, .bu = 0x05c7, .au = 0x0200,
.rv = 0x0340, .gv = 0x030c, .bv = 0x06d0, .av = 0x0200,
};
static const struct video_levels pal_n_levels_composite = {
@ -306,9 +306,9 @@ static const struct video_levels pal_n_levels_composite = {
};
static const struct color_conversion pal_n_csc_svideo = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0134,
.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0f00,
.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0f00,
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0133,
.ru = 0x076a, .gu = 0x0564, .bu = 0x030d, .au = 0x0200,
.rv = 0x037a, .gv = 0x033d, .bv = 0x06f6, .av = 0x0200,
};
static const struct video_levels pal_n_levels_svideo = {
@ -319,9 +319,9 @@ static const struct video_levels pal_n_levels_svideo = {
* Component connections
*/
static const struct color_conversion sdtv_csc_yprpb = {
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0146,
.ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0f00,
.rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0f00,
.ry = 0x0332, .gy = 0x012d, .by = 0x07d3, .ay = 0x0145,
.ru = 0x0559, .gu = 0x0353, .bu = 0x0100, .au = 0x0200,
.rv = 0x0100, .gv = 0x03ad, .bv = 0x074d, .av = 0x0200,
};
static const struct color_conversion sdtv_csc_rgb = {
@ -331,9 +331,9 @@ static const struct color_conversion sdtv_csc_rgb = {
};
static const struct color_conversion hdtv_csc_yprpb = {
.ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0146,
.ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0f00,
.rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0f00,
.ry = 0x05b3, .gy = 0x016e, .by = 0x0728, .ay = 0x0145,
.ru = 0x07d5, .gu = 0x038b, .bu = 0x0100, .au = 0x0200,
.rv = 0x0100, .gv = 0x03d1, .bv = 0x06bc, .av = 0x0200,
};
static const struct color_conversion hdtv_csc_rgb = {
@ -414,7 +414,7 @@ struct tv_mode {
static const struct tv_mode tv_modes[] = {
{
.name = "NTSC-M",
.clock = 107520,
.clock = 108000,
.refresh = 29970,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@ -442,8 +442,8 @@ static const struct tv_mode tv_modes[] = {
.vburst_start_f4 = 10, .vburst_end_f4 = 240,
/* desired 3.5800000 actual 3.5800000 clock 107.52 */
.dda1_inc = 136,
.dda2_inc = 7624, .dda2_size = 20013,
.dda1_inc = 135,
.dda2_inc = 20800, .dda2_size = 27456,
.dda3_inc = 0, .dda3_size = 0,
.sc_reset = TV_SC_RESET_EVERY_4,
.pal_burst = false,
@ -457,7 +457,7 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "NTSC-443",
.clock = 107520,
.clock = 108000,
.refresh = 29970,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@ -485,10 +485,10 @@ static const struct tv_mode tv_modes[] = {
/* desired 4.4336180 actual 4.4336180 clock 107.52 */
.dda1_inc = 168,
.dda2_inc = 18557, .dda2_size = 20625,
.dda3_inc = 0, .dda3_size = 0,
.sc_reset = TV_SC_RESET_EVERY_8,
.pal_burst = true,
.dda2_inc = 4093, .dda2_size = 27456,
.dda3_inc = 310, .dda3_size = 525,
.sc_reset = TV_SC_RESET_NEVER,
.pal_burst = false,
.composite_levels = &ntsc_m_levels_composite,
.composite_color = &ntsc_m_csc_composite,
@ -499,7 +499,7 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "NTSC-J",
.clock = 107520,
.clock = 108000,
.refresh = 29970,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@ -527,8 +527,8 @@ static const struct tv_mode tv_modes[] = {
.vburst_start_f4 = 10, .vburst_end_f4 = 240,
/* desired 3.5800000 actual 3.5800000 clock 107.52 */
.dda1_inc = 136,
.dda2_inc = 7624, .dda2_size = 20013,
.dda1_inc = 135,
.dda2_inc = 20800, .dda2_size = 27456,
.dda3_inc = 0, .dda3_size = 0,
.sc_reset = TV_SC_RESET_EVERY_4,
.pal_burst = false,
@ -542,7 +542,7 @@ static const struct tv_mode tv_modes[] = {
},
{
.name = "PAL-M",
.clock = 107520,
.clock = 108000,
.refresh = 29970,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@ -570,11 +570,11 @@ static const struct tv_mode tv_modes[] = {
.vburst_start_f4 = 10, .vburst_end_f4 = 240,
/* desired 3.5800000 actual 3.5800000 clock 107.52 */
.dda1_inc = 136,
.dda2_inc = 7624, .dda2_size = 20013,
.dda1_inc = 135,
.dda2_inc = 16704, .dda2_size = 27456,
.dda3_inc = 0, .dda3_size = 0,
.sc_reset = TV_SC_RESET_EVERY_4,
.pal_burst = false,
.sc_reset = TV_SC_RESET_EVERY_8,
.pal_burst = true,
.composite_levels = &pal_m_levels_composite,
.composite_color = &pal_m_csc_composite,
@ -586,7 +586,7 @@ static const struct tv_mode tv_modes[] = {
{
/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
.name = "PAL-N",
.clock = 107520,
.clock = 108000,
.refresh = 25000,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
@ -615,9 +615,9 @@ static const struct tv_mode tv_modes[] = {
/* desired 4.4336180 actual 4.4336180 clock 107.52 */
.dda1_inc = 168,
.dda2_inc = 18557, .dda2_size = 20625,
.dda3_inc = 0, .dda3_size = 0,
.dda1_inc = 135,
.dda2_inc = 23578, .dda2_size = 27648,
.dda3_inc = 134, .dda3_size = 625,
.sc_reset = TV_SC_RESET_EVERY_8,
.pal_burst = true,
@ -631,12 +631,12 @@ static const struct tv_mode tv_modes[] = {
{
/* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
.name = "PAL",
.clock = 107520,
.clock = 108000,
.refresh = 25000,
.oversample = TV_OVERSAMPLE_8X,
.component_only = 0,
.hsync_end = 64, .hblank_end = 128,
.hsync_end = 64, .hblank_end = 142,
.hblank_start = 844, .htotal = 863,
.progressive = false, .trilevel_sync = false,
@ -659,8 +659,8 @@ static const struct tv_mode tv_modes[] = {
/* desired 4.4336180 actual 4.4336180 clock 107.52 */
.dda1_inc = 168,
.dda2_inc = 18557, .dda2_size = 20625,
.dda3_inc = 0, .dda3_size = 0,
.dda2_inc = 4122, .dda2_size = 27648,
.dda3_inc = 67, .dda3_size = 625,
.sc_reset = TV_SC_RESET_EVERY_8,
.pal_burst = true,
@ -689,7 +689,7 @@ static const struct tv_mode tv_modes[] = {
.veq_ena = false,
.vi_end_f1 = 44, .vi_end_f2 = 44,
.nbr_end = 496,
.nbr_end = 479,
.burst_ena = false,
@ -713,7 +713,7 @@ static const struct tv_mode tv_modes[] = {
.veq_ena = false,
.vi_end_f1 = 44, .vi_end_f2 = 44,
.nbr_end = 496,
.nbr_end = 479,
.burst_ena = false,
@ -876,7 +876,7 @@ static const struct tv_mode tv_modes[] = {
.component_only = 1,
.hsync_end = 88, .hblank_end = 235,
.hblank_start = 2155, .htotal = 2200,
.hblank_start = 2155, .htotal = 2201,
.progressive = false, .trilevel_sync = true,
@ -1082,7 +1082,7 @@ intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mo
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
/* Ensure TV refresh is close to desired refresh */
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 1)
if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode)) < 10)
return MODE_OK;
return MODE_CLOCK_RANGE;
}
@ -1135,7 +1135,8 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
if (!tv_mode)
return; /* can't happen (mode_prepare prevents this) */
tv_ctl = 0;
tv_ctl = I915_READ(TV_CTL);
tv_ctl &= TV_CTL_SAVE;
switch (tv_priv->type) {
default:
@ -1215,7 +1216,6 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
/* dda1 implies valid video levels */
if (tv_mode->dda1_inc) {
scctl1 |= TV_SC_DDA1_EN;
scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
}
if (tv_mode->dda2_inc)
@ -1225,6 +1225,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
scctl1 |= TV_SC_DDA3_EN;
scctl1 |= tv_mode->sc_reset;
scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@ -1266,7 +1267,11 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
color_conversion->av);
}
I915_WRITE(TV_CLR_KNOBS, 0x00606000);
if (IS_I965G(dev))
I915_WRITE(TV_CLR_KNOBS, 0x00404000);
else
I915_WRITE(TV_CLR_KNOBS, 0x00606000);
if (video_levels)
I915_WRITE(TV_CLR_LEVEL,
((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
@ -1401,6 +1406,7 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
tv_dac = I915_READ(TV_DAC);
I915_WRITE(TV_DAC, save_tv_dac);
I915_WRITE(TV_CTL, save_tv_ctl);
intel_wait_for_vblank(dev);
}
/*
* A B C
@ -1451,7 +1457,7 @@ intel_tv_detect(struct drm_connector *connector)
mode = reported_modes[0];
drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
if (encoder->crtc) {
if (encoder->crtc && encoder->crtc->enabled) {
type = intel_tv_detect_type(encoder->crtc, intel_output);
} else {
crtc = intel_get_load_detect_pipe(intel_output, &mode, &dpms_mode);
@ -1462,6 +1468,8 @@ intel_tv_detect(struct drm_connector *connector)
type = -1;
}
tv_priv->type = type;
if (type < 0)
return connector_status_disconnected;
@ -1495,7 +1503,8 @@ intel_tv_get_modes(struct drm_connector *connector)
struct drm_display_mode *mode_ptr;
struct intel_output *intel_output = to_intel_output(connector);
const struct tv_mode *tv_mode = intel_tv_mode_find(intel_output);
int j;
int j, count = 0;
u64 tmp;
for (j = 0; j < sizeof(input_res_table) / sizeof(input_res_table[0]);
j++) {
@ -1510,8 +1519,9 @@ intel_tv_get_modes(struct drm_connector *connector)
&& !tv_mode->component_only))
continue;
mode_ptr = drm_calloc(1, sizeof(struct drm_display_mode),
DRM_MEM_DRIVER);
mode_ptr = drm_mode_create(connector->dev);
if (!mode_ptr)
continue;
strncpy(mode_ptr->name, input->name, DRM_DISPLAY_MODE_LEN);
mode_ptr->hdisplay = hactive_s;
@ -1528,15 +1538,17 @@ intel_tv_get_modes(struct drm_connector *connector)
mode_ptr->vsync_end = mode_ptr->vsync_start + 1;
mode_ptr->vtotal = vactive_s + 33;
mode_ptr->clock = (int) (tv_mode->refresh *
mode_ptr->vtotal *
mode_ptr->htotal / 1000) / 1000;
tmp = (u64) tv_mode->refresh * mode_ptr->vtotal;
tmp *= mode_ptr->htotal;
tmp = div_u64(tmp, 1000000);
mode_ptr->clock = (int) tmp;
mode_ptr->type = DRM_MODE_TYPE_DRIVER;
drm_mode_probed_add(connector, mode_ptr);
count++;
}
return 0;
return count;
}
static void

Просмотреть файл

@ -68,22 +68,22 @@ static struct hpsb_highlevel csr_highlevel = {
.host_reset = host_reset,
};
const static struct hpsb_address_ops map_ops = {
static const struct hpsb_address_ops map_ops = {
.read = read_maps,
};
const static struct hpsb_address_ops fcp_ops = {
static const struct hpsb_address_ops fcp_ops = {
.write = write_fcp,
};
const static struct hpsb_address_ops reg_ops = {
static const struct hpsb_address_ops reg_ops = {
.read = read_regs,
.write = write_regs,
.lock = lock_regs,
.lock64 = lock64_regs,
};
const static struct hpsb_address_ops config_rom_ops = {
static const struct hpsb_address_ops config_rom_ops = {
.read = read_config_rom,
};

Просмотреть файл

@ -2171,7 +2171,7 @@ static const struct file_operations dv1394_fops=
* Export information about protocols/devices supported by this driver.
*/
#ifdef MODULE
static struct ieee1394_device_id dv1394_id_table[] = {
static const struct ieee1394_device_id dv1394_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
.specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,

Просмотреть файл

@ -181,7 +181,7 @@ static void ether1394_remove_host(struct hpsb_host *host);
static void ether1394_host_reset(struct hpsb_host *host);
/* Function for incoming 1394 packets */
const static struct hpsb_address_ops addr_ops = {
static const struct hpsb_address_ops addr_ops = {
.write = ether1394_write,
};
@ -438,7 +438,7 @@ static int eth1394_update(struct unit_directory *ud)
return eth1394_new_node(hi, ud);
}
static struct ieee1394_device_id eth1394_id_table[] = {
static const struct ieee1394_device_id eth1394_id_table[] = {
{
.match_flags = (IEEE1394_MATCH_SPECIFIER_ID |
IEEE1394_MATCH_VERSION),

Просмотреть файл

@ -478,7 +478,7 @@ int hpsb_unregister_addrspace(struct hpsb_highlevel *hl, struct hpsb_host *host,
return retval;
}
const static struct hpsb_address_ops dummy_ops;
static const struct hpsb_address_ops dummy_ops;
/* dummy address spaces as lower and upper bounds of the host's a.s. list */
static void init_hpsb_highlevel(struct hpsb_host *host)

Просмотреть файл

@ -484,7 +484,7 @@ static struct device_attribute *const fw_host_attrs[] = {
static ssize_t fw_show_drv_device_ids(struct device_driver *drv, char *buf)
{
struct hpsb_protocol_driver *driver;
struct ieee1394_device_id *id;
const struct ieee1394_device_id *id;
int length = 0;
char *scratch = buf;
@ -658,7 +658,7 @@ static int nodemgr_bus_match(struct device * dev, struct device_driver * drv)
{
struct hpsb_protocol_driver *driver;
struct unit_directory *ud;
struct ieee1394_device_id *id;
const struct ieee1394_device_id *id;
/* We only match unit directories */
if (dev->platform_data != &nodemgr_ud_platform_data)

Просмотреть файл

@ -125,7 +125,7 @@ struct hpsb_protocol_driver {
* probe function below can implement further protocol
* dependent or vendor dependent checking.
*/
struct ieee1394_device_id *id_table;
const struct ieee1394_device_id *id_table;
/*
* The update function is called when the node has just

Просмотреть файл

@ -90,7 +90,7 @@ static int arm_lock(struct hpsb_host *host, int nodeid, quadlet_t * store,
static int arm_lock64(struct hpsb_host *host, int nodeid, octlet_t * store,
u64 addr, octlet_t data, octlet_t arg, int ext_tcode,
u16 flags);
const static struct hpsb_address_ops arm_ops = {
static const struct hpsb_address_ops arm_ops = {
.read = arm_read,
.write = arm_write,
.lock = arm_lock,
@ -369,6 +369,7 @@ static const char __user *raw1394_compat_write(const char __user *buf)
{
struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
struct raw1394_request __user *r;
r = compat_alloc_user_space(sizeof(struct raw1394_request));
#define C(x) __copy_in_user(&r->x, &cr->x, sizeof(r->x))
@ -378,7 +379,8 @@ static const char __user *raw1394_compat_write(const char __user *buf)
C(tag) ||
C(sendb) ||
C(recvb))
return ERR_PTR(-EFAULT);
return (__force const char __user *)ERR_PTR(-EFAULT);
return (const char __user *)r;
}
#undef C
@ -389,6 +391,7 @@ static int
raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
{
struct compat_raw1394_req __user *cr = (typeof(cr)) buf;
if (!access_ok(VERIFY_WRITE, cr, sizeof(struct compat_raw1394_req)) ||
P(type) ||
P(error) ||
@ -400,6 +403,7 @@ raw1394_compat_read(const char __user *buf, struct raw1394_request *r)
P(sendb) ||
P(recvb))
return -EFAULT;
return sizeof(struct compat_raw1394_req);
}
#undef P
@ -2249,8 +2253,8 @@ static ssize_t raw1394_write(struct file *file, const char __user * buffer,
sizeof(struct compat_raw1394_req) !=
sizeof(struct raw1394_request)) {
buffer = raw1394_compat_write(buffer);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
if (IS_ERR((__force void *)buffer))
return PTR_ERR((__force void *)buffer);
} else
#endif
if (count != sizeof(struct raw1394_request)) {
@ -2978,7 +2982,7 @@ static int raw1394_release(struct inode *inode, struct file *file)
* Export information about protocols/devices supported by this driver.
*/
#ifdef MODULE
static struct ieee1394_device_id raw1394_id_table[] = {
static const struct ieee1394_device_id raw1394_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
.specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff,

Просмотреть файл

@ -265,7 +265,7 @@ static struct hpsb_highlevel sbp2_highlevel = {
.host_reset = sbp2_host_reset,
};
const static struct hpsb_address_ops sbp2_ops = {
static const struct hpsb_address_ops sbp2_ops = {
.write = sbp2_handle_status_write
};
@ -275,7 +275,7 @@ static int sbp2_handle_physdma_write(struct hpsb_host *, int, int, quadlet_t *,
static int sbp2_handle_physdma_read(struct hpsb_host *, int, quadlet_t *, u64,
size_t, u16);
const static struct hpsb_address_ops sbp2_physdma_ops = {
static const struct hpsb_address_ops sbp2_physdma_ops = {
.read = sbp2_handle_physdma_read,
.write = sbp2_handle_physdma_write,
};
@ -285,7 +285,7 @@ const static struct hpsb_address_ops sbp2_physdma_ops = {
/*
* Interface to driver core and IEEE 1394 core
*/
static struct ieee1394_device_id sbp2_id_table[] = {
static const struct ieee1394_device_id sbp2_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
.specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff,
@ -1413,8 +1413,7 @@ static void sbp2_parse_unit_directory(struct sbp2_lu *lu,
"(firmware_revision 0x%06x, vendor_id 0x%06x,"
" model_id 0x%06x)",
NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid),
workarounds, firmware_revision,
ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id,
workarounds, firmware_revision, ud->vendor_id,
model);
/* We would need one SCSI host template for each target to adjust

Просмотреть файл

@ -1294,7 +1294,7 @@ static const struct file_operations video1394_fops=
* Export information about protocols/devices supported by this driver.
*/
#ifdef MODULE
static struct ieee1394_device_id video1394_id_table[] = {
static const struct ieee1394_device_id video1394_id_table[] = {
{
.match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION,
.specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff,

Просмотреть файл

@ -1602,8 +1602,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,
netif_napi_add(netdev, &nesvnic->napi, nes_netdev_poll, 128);
nes_debug(NES_DBG_INIT, "Enabling VLAN Insert/Delete.\n");
netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
netdev->vlan_rx_register = nes_netdev_vlan_rx_register;
netdev->features |= NETIF_F_LLTX;
/* Fill in the port structure */
nesvnic->netdev = netdev;

Просмотреть файл

@ -115,7 +115,7 @@ static const char *debug_fcp_ctype(unsigned int ctype)
}
static const char *debug_fcp_opcode(unsigned int opcode,
const u8 *data, size_t length)
const u8 *data, int length)
{
switch (opcode) {
case AVC_OPCODE_VENDOR: break;
@ -135,13 +135,14 @@ static const char *debug_fcp_opcode(unsigned int opcode,
case SFE_VENDOR_OPCODE_REGISTER_REMOTE_CONTROL: return "RegisterRC";
case SFE_VENDOR_OPCODE_LNB_CONTROL: return "LNBControl";
case SFE_VENDOR_OPCODE_TUNE_QPSK: return "TuneQPSK";
case SFE_VENDOR_OPCODE_TUNE_QPSK2: return "TuneQPSK2";
case SFE_VENDOR_OPCODE_HOST2CA: return "Host2CA";
case SFE_VENDOR_OPCODE_CA2HOST: return "CA2Host";
}
return "Vendor";
}
static void debug_fcp(const u8 *data, size_t length)
static void debug_fcp(const u8 *data, int length)
{
unsigned int subunit_type, subunit_id, op;
const char *prefix = data[0] > 7 ? "FCP <- " : "FCP -> ";
@ -266,7 +267,10 @@ static void avc_tuner_tuneqpsk(struct firedtv *fdtv,
c->operand[0] = SFE_VENDOR_DE_COMPANYID_0;
c->operand[1] = SFE_VENDOR_DE_COMPANYID_1;
c->operand[2] = SFE_VENDOR_DE_COMPANYID_2;
c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK;
if (fdtv->type == FIREDTV_DVB_S2)
c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK2;
else
c->operand[3] = SFE_VENDOR_OPCODE_TUNE_QPSK;
c->operand[4] = (params->frequency >> 24) & 0xff;
c->operand[5] = (params->frequency >> 16) & 0xff;

Просмотреть файл

@ -81,13 +81,16 @@ static int get_sb_mtd_aux(struct file_system_type *fs_type, int flags,
/* go */
sb->s_flags |= MS_ACTIVE;
return simple_set_mnt(mnt, sb);
simple_set_mnt(mnt, sb);
return 0;
/* new mountpoint for an already mounted superblock */
already_mounted:
DEBUG(1, "MTDSB: Device %d (\"%s\") is already mounted\n",
mtd->index, mtd->name);
ret = simple_set_mnt(mnt, sb);
simple_set_mnt(mnt, sb);
ret = 0;
goto out_put;
out_error:

Просмотреть файл

@ -353,9 +353,6 @@ el2_probe1(struct net_device *dev, int ioaddr)
dev->netdev_ops = &el2_netdev_ops;
dev->ethtool_ops = &netdev_ethtool_ops;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = eip_poll;
#endif
retval = register_netdev(dev);
if (retval)

Просмотреть файл

@ -972,6 +972,14 @@ config ENC28J60_WRITEVERIFY
Enable the verify after the buffer write useful for debugging purpose.
If unsure, say N.
config ETHOC
tristate "OpenCores 10/100 Mbps Ethernet MAC support"
depends on NET_ETHERNET
select MII
select PHYLIB
help
Say Y here if you want to use the OpenCores 10/100 Mbps Ethernet MAC.
config SMC911X
tristate "SMSC LAN911[5678] support"
select CRC32

Просмотреть файл

@ -230,6 +230,7 @@ obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o
pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o
obj-$(CONFIG_MLX4_CORE) += mlx4/
obj-$(CONFIG_ENC28J60) += enc28j60.o
obj-$(CONFIG_ETHOC) += ethoc.o
obj-$(CONFIG_XTENSA_XT2000_SONIC) += xtsonic.o

Просмотреть файл

@ -143,6 +143,22 @@ out:
}
#endif
static const struct net_device_ops ac_netdev_ops = {
.ndo_open = ac_open,
.ndo_stop = ac_close_card,
.ndo_start_xmit = ei_start_xmit,
.ndo_tx_timeout = ei_tx_timeout,
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,
#endif
};
static int __init ac_probe1(int ioaddr, struct net_device *dev)
{
int i, retval;
@ -253,11 +269,7 @@ static int __init ac_probe1(int ioaddr, struct net_device *dev)
ei_status.block_output = &ac_block_output;
ei_status.get_8390_hdr = &ac_get_8390_hdr;
dev->open = &ac_open;
dev->stop = &ac_close_card;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = ei_poll;
#endif
dev->netdev_ops = &ac_netdev_ops;
NS8390_init(dev, 0);
retval = register_netdev(dev);

Просмотреть файл

@ -171,7 +171,6 @@ static unsigned int cops_debug = COPS_DEBUG;
struct cops_local
{
struct net_device_stats stats;
int board; /* Holds what board type is. */
int nodeid; /* Set to 1 once have nodeid. */
unsigned char node_acquire; /* Node ID when acquired. */
@ -197,7 +196,6 @@ static int cops_send_packet (struct sk_buff *skb, struct net_device *dev);
static void set_multicast_list (struct net_device *dev);
static int cops_ioctl (struct net_device *dev, struct ifreq *rq, int cmd);
static int cops_close (struct net_device *dev);
static struct net_device_stats *cops_get_stats (struct net_device *dev);
static void cleanup_card(struct net_device *dev)
{
@ -260,6 +258,15 @@ out:
return ERR_PTR(err);
}
static const struct net_device_ops cops_netdev_ops = {
.ndo_open = cops_open,
.ndo_stop = cops_close,
.ndo_start_xmit = cops_send_packet,
.ndo_tx_timeout = cops_timeout,
.ndo_do_ioctl = cops_ioctl,
.ndo_set_multicast_list = set_multicast_list,
};
/*
* This is the real probe routine. Linux has a history of friendly device
* probes on the ISA bus. A good device probes avoids doing writes, and
@ -333,16 +340,9 @@ static int __init cops_probe1(struct net_device *dev, int ioaddr)
/* Copy local board variable to lp struct. */
lp->board = board;
dev->hard_start_xmit = cops_send_packet;
dev->tx_timeout = cops_timeout;
dev->netdev_ops = &cops_netdev_ops;
dev->watchdog_timeo = HZ * 2;
dev->get_stats = cops_get_stats;
dev->open = cops_open;
dev->stop = cops_close;
dev->do_ioctl = cops_ioctl;
dev->set_multicast_list = set_multicast_list;
dev->mc_list = NULL;
/* Tell the user where the card is and what mode we're in. */
if(board==DAYNA)
@ -797,7 +797,7 @@ static void cops_rx(struct net_device *dev)
{
printk(KERN_WARNING "%s: Memory squeeze, dropping packet.\n",
dev->name);
lp->stats.rx_dropped++;
dev->stats.rx_dropped++;
while(pkt_len--) /* Discard packet */
inb(ioaddr);
spin_unlock_irqrestore(&lp->lock, flags);
@ -819,7 +819,7 @@ static void cops_rx(struct net_device *dev)
{
printk(KERN_WARNING "%s: Bad packet length of %d bytes.\n",
dev->name, pkt_len);
lp->stats.tx_errors++;
dev->stats.tx_errors++;
dev_kfree_skb_any(skb);
return;
}
@ -836,7 +836,7 @@ static void cops_rx(struct net_device *dev)
if(rsp_type != LAP_RESPONSE)
{
printk(KERN_WARNING "%s: Bad packet type %d.\n", dev->name, rsp_type);
lp->stats.tx_errors++;
dev->stats.tx_errors++;
dev_kfree_skb_any(skb);
return;
}
@ -846,8 +846,8 @@ static void cops_rx(struct net_device *dev)
skb_reset_transport_header(skb); /* Point to data (Skip header). */
/* Update the counters. */
lp->stats.rx_packets++;
lp->stats.rx_bytes += skb->len;
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
/* Send packet to a higher place. */
netif_rx(skb);
@ -858,7 +858,7 @@ static void cops_timeout(struct net_device *dev)
struct cops_local *lp = netdev_priv(dev);
int ioaddr = dev->base_addr;
lp->stats.tx_errors++;
dev->stats.tx_errors++;
if(lp->board==TANGENT)
{
if((inb(ioaddr+TANG_CARD_STATUS)&TANG_TX_READY)==0)
@ -916,8 +916,8 @@ static int cops_send_packet(struct sk_buff *skb, struct net_device *dev)
spin_unlock_irqrestore(&lp->lock, flags); /* Restore interrupts. */
/* Done sending packet, update counters and cleanup. */
lp->stats.tx_packets++;
lp->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
dev->trans_start = jiffies;
dev_kfree_skb (skb);
return 0;
@ -986,15 +986,6 @@ static int cops_close(struct net_device *dev)
return 0;
}
/*
* Get the current statistics.
* This may be called with the card open or closed.
*/
static struct net_device_stats *cops_get_stats(struct net_device *dev)
{
struct cops_local *lp = netdev_priv(dev);
return &lp->stats;
}
#ifdef MODULE
static struct net_device *cops_dev;

Просмотреть файл

@ -261,7 +261,6 @@ static unsigned char *ltdmacbuf;
struct ltpc_private
{
struct net_device_stats stats;
struct atalk_addr my_addr;
};
@ -699,7 +698,6 @@ static int do_read(struct net_device *dev, void *cbuf, int cbuflen,
static struct timer_list ltpc_timer;
static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev);
static struct net_device_stats *ltpc_get_stats(struct net_device *dev);
static int read_30 ( struct net_device *dev)
{
@ -726,8 +724,6 @@ static int sendup_buffer (struct net_device *dev)
int dnode, snode, llaptype, len;
int sklen;
struct sk_buff *skb;
struct ltpc_private *ltpc_priv = netdev_priv(dev);
struct net_device_stats *stats = &ltpc_priv->stats;
struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;
if (ltc->command != LT_RCVLAP) {
@ -779,8 +775,8 @@ static int sendup_buffer (struct net_device *dev)
skb_reset_transport_header(skb);
stats->rx_packets++;
stats->rx_bytes+=skb->len;
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
/* toss it onwards */
netif_rx(skb);
@ -904,10 +900,6 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
/* in kernel 1.3.xx, on entry skb->data points to ddp header,
* and skb->len is the length of the ddp data + ddp header
*/
struct ltpc_private *ltpc_priv = netdev_priv(dev);
struct net_device_stats *stats = &ltpc_priv->stats;
int i;
struct lt_sendlap cbuf;
unsigned char *hdr;
@ -936,20 +928,13 @@ static int ltpc_xmit(struct sk_buff *skb, struct net_device *dev)
printk("\n");
}
stats->tx_packets++;
stats->tx_bytes+=skb->len;
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
dev_kfree_skb(skb);
return 0;
}
static struct net_device_stats *ltpc_get_stats(struct net_device *dev)
{
struct ltpc_private *ltpc_priv = netdev_priv(dev);
struct net_device_stats *stats = &ltpc_priv->stats;
return stats;
}
/* initialization stuff */
static int __init ltpc_probe_dma(int base, int dma)
@ -1027,6 +1012,12 @@ static int __init ltpc_probe_dma(int base, int dma)
return (want & 2) ? 3 : 1;
}
static const struct net_device_ops ltpc_netdev = {
.ndo_start_xmit = ltpc_xmit,
.ndo_do_ioctl = ltpc_ioctl,
.ndo_set_multicast_list = set_multicast_list,
};
struct net_device * __init ltpc_probe(void)
{
struct net_device *dev;
@ -1133,14 +1124,7 @@ struct net_device * __init ltpc_probe(void)
else
printk(KERN_INFO "Apple/Farallon LocalTalk-PC card at %03x, DMA%d. Using polled mode.\n",io,dma);
/* Fill in the fields of the device structure with ethernet-generic values. */
dev->hard_start_xmit = ltpc_xmit;
dev->get_stats = ltpc_get_stats;
/* add the ltpc-specific things */
dev->do_ioctl = &ltpc_ioctl;
dev->set_multicast_list = &set_multicast_list;
dev->netdev_ops = &ltpc_netdev;
dev->mc_list = NULL;
dev->base_addr = io;
dev->irq = irq;

Просмотреть файл

@ -249,6 +249,17 @@ out:
return ERR_PTR(err);
}
static const struct net_device_ops at1700_netdev_ops = {
.ndo_open = net_open,
.ndo_stop = net_close,
.ndo_start_xmit = net_send_packet,
.ndo_set_multicast_list = set_rx_mode,
.ndo_tx_timeout = net_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
/* The Fujitsu datasheet suggests that the NIC be probed for by checking its
"signature", the default bit pattern after a reset. This *doesn't* work --
there is no way to reset the bus interface without a complete power-cycle!
@ -448,13 +459,7 @@ found:
if (net_debug)
printk(version);
memset(lp, 0, sizeof(struct net_local));
dev->open = net_open;
dev->stop = net_close;
dev->hard_start_xmit = net_send_packet;
dev->set_multicast_list = &set_rx_mode;
dev->tx_timeout = net_tx_timeout;
dev->netdev_ops = &at1700_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
spin_lock_init(&lp->lock);

Просмотреть файл

@ -16,6 +16,7 @@
*/
#include "be.h"
#include <asm/div64.h>
MODULE_VERSION(DRV_VER);
MODULE_DEVICE_TABLE(pci, be_dev_ids);
@ -290,6 +291,17 @@ static struct net_device_stats *be_get_stats(struct net_device *dev)
return &adapter->stats.net_stats;
}
static u32 be_calc_rate(u64 bytes, unsigned long ticks)
{
u64 rate = bytes;
do_div(rate, ticks / HZ);
rate <<= 3; /* bytes/sec -> bits/sec */
do_div(rate, 1000000ul); /* MB/Sec */
return rate;
}
static void be_tx_rate_update(struct be_adapter *adapter)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
@ -303,11 +315,9 @@ static void be_tx_rate_update(struct be_adapter *adapter)
/* Update tx rate once in two seconds */
if ((now - stats->be_tx_jiffies) > 2 * HZ) {
u32 r;
r = (stats->be_tx_bytes - stats->be_tx_bytes_prev) /
((now - stats->be_tx_jiffies) / HZ);
r = r / 1000000; /* M bytes/s */
stats->be_tx_rate = r * 8; /* M bits/s */
stats->be_tx_rate = be_calc_rate(stats->be_tx_bytes
- stats->be_tx_bytes_prev,
now - stats->be_tx_jiffies);
stats->be_tx_jiffies = now;
stats->be_tx_bytes_prev = stats->be_tx_bytes;
}
@ -599,7 +609,6 @@ static void be_rx_rate_update(struct be_adapter *adapter)
{
struct be_drvr_stats *stats = drvr_stats(adapter);
ulong now = jiffies;
u32 rate;
/* Wrapped around */
if (time_before(now, stats->be_rx_jiffies)) {
@ -611,10 +620,9 @@ static void be_rx_rate_update(struct be_adapter *adapter)
if ((now - stats->be_rx_jiffies) < 2 * HZ)
return;
rate = (stats->be_rx_bytes - stats->be_rx_bytes_prev) /
((now - stats->be_rx_jiffies) / HZ);
rate = rate / 1000000; /* MB/Sec */
stats->be_rx_rate = rate * 8; /* Mega Bits/Sec */
stats->be_rx_rate = be_calc_rate(stats->be_rx_bytes
- stats->be_rx_bytes_prev,
now - stats->be_rx_jiffies);
stats->be_rx_jiffies = now;
stats->be_rx_bytes_prev = stats->be_rx_bytes;
}

Просмотреть файл

@ -501,6 +501,21 @@ static void net_poll_controller(struct net_device *dev)
}
#endif
static const struct net_device_ops net_ops = {
.ndo_open = net_open,
.ndo_stop = net_close,
.ndo_tx_timeout = net_timeout,
.ndo_start_xmit = net_send_packet,
.ndo_get_stats = net_get_stats,
.ndo_set_multicast_list = set_multicast_list,
.ndo_set_mac_address = set_mac_address,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = net_poll_controller,
#endif
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
};
/* This is the real probe routine. Linux has a history of friendly device
probes on the ISA bus. A good device probes avoids doing writes, and
verifies that the correct device exists and functions.
@ -843,17 +858,8 @@ cs89x0_probe1(struct net_device *dev, int ioaddr, int modular)
/* print the ethernet address. */
printk(", MAC %pM", dev->dev_addr);
dev->open = net_open;
dev->stop = net_close;
dev->tx_timeout = net_timeout;
dev->watchdog_timeo = HZ;
dev->hard_start_xmit = net_send_packet;
dev->get_stats = net_get_stats;
dev->set_multicast_list = set_multicast_list;
dev->set_mac_address = set_mac_address;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = net_poll_controller;
#endif
dev->netdev_ops = &net_ops;
dev->watchdog_timeo = HZ;
printk("\n");
if (net_debug)

Просмотреть файл

@ -85,6 +85,8 @@ struct fl_pg_chunk {
struct page *page;
void *va;
unsigned int offset;
u64 *p_cnt;
DECLARE_PCI_UNMAP_ADDR(mapping);
};
struct rx_desc;
@ -101,6 +103,7 @@ struct sge_fl { /* SGE per free-buffer list state */
struct fl_pg_chunk pg_chunk;/* page chunk cache */
unsigned int use_pages; /* whether FL uses pages or sk_buffs */
unsigned int order; /* order of page allocations */
unsigned int alloc_size; /* size of allocated buffer */
struct rx_desc *desc; /* address of HW Rx descriptor ring */
struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
dma_addr_t phys_addr; /* physical address of HW ring start */
@ -291,6 +294,7 @@ void t3_os_link_fault_handler(struct adapter *adapter, int port_id);
void t3_sge_start(struct adapter *adap);
void t3_sge_stop(struct adapter *adap);
void t3_start_sge_timers(struct adapter *adap);
void t3_stop_sge_timers(struct adapter *adap);
void t3_free_sge_resources(struct adapter *adap);
void t3_sge_err_intr_handler(struct adapter *adapter);

Просмотреть файл

@ -191,7 +191,8 @@ struct mdio_ops {
};
struct adapter_info {
unsigned char nports; /* # of ports */
unsigned char nports0; /* # of ports on channel 0 */
unsigned char nports1; /* # of ports on channel 1 */
unsigned char phy_base_addr; /* MDIO PHY base address */
unsigned int gpio_out; /* GPIO output settings */
unsigned char gpio_intr[MAX_NPORTS]; /* GPIO PHY IRQ pins */
@ -422,6 +423,7 @@ struct adapter_params {
unsigned short b_wnd[NCCTRL_WIN];
unsigned int nports; /* # of ethernet ports */
unsigned int chan_map; /* bitmap of in-use Tx channels */
unsigned int stats_update_period; /* MAC stats accumulation period */
unsigned int linkpoll_period; /* link poll period in 0.1s */
unsigned int rev; /* chip revision */

Просмотреть файл

@ -602,7 +602,6 @@ static int setup_sge_qsets(struct adapter *adap)
&adap->params.sge.qset[qset_idx], ntxq, dev,
netdev_get_tx_queue(dev, j));
if (err) {
t3_stop_sge_timers(adap);
t3_free_sge_resources(adap);
return err;
}
@ -1046,6 +1045,8 @@ static int cxgb_up(struct adapter *adap)
setup_rss(adap);
if (!(adap->flags & NAPI_INIT))
init_napi(adap);
t3_start_sge_timers(adap);
adap->flags |= FULL_INIT_DONE;
}
@ -2870,6 +2871,9 @@ static void t3_io_resume(struct pci_dev *pdev)
{
struct adapter *adapter = pci_get_drvdata(pdev);
CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
t3_read_reg(adapter, A_PCIE_PEX_ERR));
t3_resume_ports(adapter);
}
@ -3002,7 +3006,7 @@ static int __devinit init_one(struct pci_dev *pdev,
static int version_printed;
int i, err, pci_using_dac = 0;
unsigned long mmio_start, mmio_len;
resource_size_t mmio_start, mmio_len;
const struct adapter_info *ai;
struct adapter *adapter = NULL;
struct port_info *pi;
@ -3082,7 +3086,7 @@ static int __devinit init_one(struct pci_dev *pdev,
INIT_WORK(&adapter->fatal_error_handler_task, fatal_error_task);
INIT_DELAYED_WORK(&adapter->adap_check_task, t3_adap_check_task);
for (i = 0; i < ai->nports; ++i) {
for (i = 0; i < ai->nports0 + ai->nports1; ++i) {
struct net_device *netdev;
netdev = alloc_etherdev_mq(sizeof(struct port_info), SGE_QSETS);
@ -3172,7 +3176,7 @@ static int __devinit init_one(struct pci_dev *pdev,
out_free_dev:
iounmap(adapter->regs);
for (i = ai->nports - 1; i >= 0; --i)
for (i = ai->nports0 + ai->nports1 - 1; i >= 0; --i)
if (adapter->port[i])
free_netdev(adapter->port[i]);

Просмотреть файл

@ -50,6 +50,7 @@
#define SGE_RX_COPY_THRES 256
#define SGE_RX_PULL_LEN 128
#define SGE_PG_RSVD SMP_CACHE_BYTES
/*
* Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
* It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
@ -57,8 +58,10 @@
*/
#define FL0_PG_CHUNK_SIZE 2048
#define FL0_PG_ORDER 0
#define FL0_PG_ALLOC_SIZE (PAGE_SIZE << FL0_PG_ORDER)
#define FL1_PG_CHUNK_SIZE (PAGE_SIZE > 8192 ? 16384 : 8192)
#define FL1_PG_ORDER (PAGE_SIZE > 8192 ? 0 : 1)
#define FL1_PG_ALLOC_SIZE (PAGE_SIZE << FL1_PG_ORDER)
#define SGE_RX_DROP_THRES 16
#define RX_RECLAIM_PERIOD (HZ/4)
@ -345,13 +348,21 @@ static inline int should_restart_tx(const struct sge_txq *q)
return q->in_use - r < (q->size >> 1);
}
static void clear_rx_desc(const struct sge_fl *q, struct rx_sw_desc *d)
static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q,
struct rx_sw_desc *d)
{
if (q->use_pages) {
if (d->pg_chunk.page)
put_page(d->pg_chunk.page);
if (q->use_pages && d->pg_chunk.page) {
(*d->pg_chunk.p_cnt)--;
if (!*d->pg_chunk.p_cnt)
pci_unmap_page(pdev,
pci_unmap_addr(&d->pg_chunk, mapping),
q->alloc_size, PCI_DMA_FROMDEVICE);
put_page(d->pg_chunk.page);
d->pg_chunk.page = NULL;
} else {
pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
q->buf_size, PCI_DMA_FROMDEVICE);
kfree_skb(d->skb);
d->skb = NULL;
}
@ -372,9 +383,8 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
while (q->credits--) {
struct rx_sw_desc *d = &q->sdesc[cidx];
pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
q->buf_size, PCI_DMA_FROMDEVICE);
clear_rx_desc(q, d);
clear_rx_desc(pdev, q, d);
if (++cidx == q->size)
cidx = 0;
}
@ -417,18 +427,39 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
return 0;
}
static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
static inline int add_one_rx_chunk(dma_addr_t mapping, struct rx_desc *d,
unsigned int gen)
{
d->addr_lo = cpu_to_be32(mapping);
d->addr_hi = cpu_to_be32((u64) mapping >> 32);
wmb();
d->len_gen = cpu_to_be32(V_FLD_GEN1(gen));
d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
return 0;
}
static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q,
struct rx_sw_desc *sd, gfp_t gfp,
unsigned int order)
{
if (!q->pg_chunk.page) {
dma_addr_t mapping;
q->pg_chunk.page = alloc_pages(gfp, order);
if (unlikely(!q->pg_chunk.page))
return -ENOMEM;
q->pg_chunk.va = page_address(q->pg_chunk.page);
q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) -
SGE_PG_RSVD;
q->pg_chunk.offset = 0;
mapping = pci_map_page(adapter->pdev, q->pg_chunk.page,
0, q->alloc_size, PCI_DMA_FROMDEVICE);
pci_unmap_addr_set(&q->pg_chunk, mapping, mapping);
}
sd->pg_chunk = q->pg_chunk;
prefetch(sd->pg_chunk.p_cnt);
q->pg_chunk.offset += q->buf_size;
if (q->pg_chunk.offset == (PAGE_SIZE << order))
q->pg_chunk.page = NULL;
@ -436,6 +467,12 @@ static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp,
q->pg_chunk.va += q->buf_size;
get_page(q->pg_chunk.page);
}
if (sd->pg_chunk.offset == 0)
*sd->pg_chunk.p_cnt = 1;
else
*sd->pg_chunk.p_cnt += 1;
return 0;
}
@ -460,35 +497,43 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
*/
static int refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
{
void *buf_start;
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
struct rx_desc *d = &q->desc[q->pidx];
unsigned int count = 0;
while (n--) {
dma_addr_t mapping;
int err;
if (q->use_pages) {
if (unlikely(alloc_pg_chunk(q, sd, gfp, q->order))) {
if (unlikely(alloc_pg_chunk(adap, q, sd, gfp,
q->order))) {
nomem: q->alloc_failed++;
break;
}
buf_start = sd->pg_chunk.va;
} else {
struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
mapping = pci_unmap_addr(&sd->pg_chunk, mapping) +
sd->pg_chunk.offset;
pci_unmap_addr_set(sd, dma_addr, mapping);
add_one_rx_chunk(mapping, d, q->gen);
pci_dma_sync_single_for_device(adap->pdev, mapping,
q->buf_size - SGE_PG_RSVD,
PCI_DMA_FROMDEVICE);
} else {
void *buf_start;
struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
if (!skb)
goto nomem;
sd->skb = skb;
buf_start = skb->data;
}
err = add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
adap->pdev);
if (unlikely(err)) {
clear_rx_desc(q, sd);
break;
err = add_one_rx_buf(buf_start, q->buf_size, d, sd,
q->gen, adap->pdev);
if (unlikely(err)) {
clear_rx_desc(adap->pdev, q, sd);
break;
}
}
d++;
@ -795,19 +840,19 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
struct sk_buff *newskb, *skb;
struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
newskb = skb = q->pg_skb;
dma_addr_t dma_addr = pci_unmap_addr(sd, dma_addr);
newskb = skb = q->pg_skb;
if (!skb && (len <= SGE_RX_COPY_THRES)) {
newskb = alloc_skb(len, GFP_ATOMIC);
if (likely(newskb != NULL)) {
__skb_put(newskb, len);
pci_dma_sync_single_for_cpu(adap->pdev,
pci_unmap_addr(sd, dma_addr), len,
pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
PCI_DMA_FROMDEVICE);
memcpy(newskb->data, sd->pg_chunk.va, len);
pci_dma_sync_single_for_device(adap->pdev,
pci_unmap_addr(sd, dma_addr), len,
PCI_DMA_FROMDEVICE);
pci_dma_sync_single_for_device(adap->pdev, dma_addr,
len,
PCI_DMA_FROMDEVICE);
} else if (!drop_thres)
return NULL;
recycle:
@ -820,16 +865,25 @@ recycle:
if (unlikely(q->rx_recycle_buf || (!skb && fl->credits <= drop_thres)))
goto recycle;
prefetch(sd->pg_chunk.p_cnt);
if (!skb)
newskb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
if (unlikely(!newskb)) {
if (!drop_thres)
return NULL;
goto recycle;
}
pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
fl->buf_size, PCI_DMA_FROMDEVICE);
pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
PCI_DMA_FROMDEVICE);
(*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt)
pci_unmap_page(adap->pdev,
pci_unmap_addr(&sd->pg_chunk, mapping),
fl->alloc_size,
PCI_DMA_FROMDEVICE);
if (!skb) {
__skb_put(newskb, SGE_RX_PULL_LEN);
memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
@ -1089,7 +1143,7 @@ static void write_tx_pkt_wr(struct adapter *adap, struct sk_buff *skb,
struct tx_desc *d = &q->desc[pidx];
struct cpl_tx_pkt *cpl = (struct cpl_tx_pkt *)d;
cpl->len = htonl(skb->len | 0x80000000);
cpl->len = htonl(skb->len);
cntrl = V_TXPKT_INTF(pi->port_id);
if (vlan_tx_tag_present(skb) && pi->vlan_grp)
@ -1958,8 +2012,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
skb_pull(skb, sizeof(*p) + pad);
skb->protocol = eth_type_trans(skb, adap->port[p->iff]);
pi = netdev_priv(skb->dev);
if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid && p->csum == htons(0xffff) &&
!p->fragment) {
if ((pi->rx_offload & T3_RX_CSUM) && p->csum_valid &&
p->csum == htons(0xffff) && !p->fragment) {
qs->port_stats[SGE_PSTAT_RX_CSUM_GOOD]++;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else
@ -2034,10 +2088,19 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
fl->credits--;
len -= offset;
pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
fl->buf_size, PCI_DMA_FROMDEVICE);
pci_dma_sync_single_for_cpu(adap->pdev,
pci_unmap_addr(sd, dma_addr),
fl->buf_size - SGE_PG_RSVD,
PCI_DMA_FROMDEVICE);
prefetch(&qs->lro_frag_tbl);
(*sd->pg_chunk.p_cnt)--;
if (!*sd->pg_chunk.p_cnt)
pci_unmap_page(adap->pdev,
pci_unmap_addr(&sd->pg_chunk, mapping),
fl->alloc_size,
PCI_DMA_FROMDEVICE);
prefetch(qs->lro_va);
rx_frag += nr_frags;
rx_frag->page = sd->pg_chunk.page;
@ -2047,6 +2110,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
qs->lro_frag_tbl.nr_frags++;
qs->lro_frag_tbl.len = frag_len;
if (!complete)
return;
@ -2236,6 +2300,8 @@ no_mem:
if (fl->use_pages) {
void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
prefetch(&qs->lro_frag_tbl);
prefetch(addr);
#if L1_CACHE_BYTES < 128
prefetch(addr + L1_CACHE_BYTES);
@ -2972,21 +3038,23 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
q->fl[1].use_pages = FL1_PG_CHUNK_SIZE > 0;
q->fl[0].order = FL0_PG_ORDER;
q->fl[1].order = FL1_PG_ORDER;
q->fl[0].alloc_size = FL0_PG_ALLOC_SIZE;
q->fl[1].alloc_size = FL1_PG_ALLOC_SIZE;
spin_lock_irq(&adapter->sge.reg_lock);
/* FL threshold comparison uses < */
ret = t3_sge_init_rspcntxt(adapter, q->rspq.cntxt_id, irq_vec_idx,
q->rspq.phys_addr, q->rspq.size,
q->fl[0].buf_size, 1, 0);
q->fl[0].buf_size - SGE_PG_RSVD, 1, 0);
if (ret)
goto err_unlock;
for (i = 0; i < SGE_RXQ_PER_SET; ++i) {
ret = t3_sge_init_flcntxt(adapter, q->fl[i].cntxt_id, 0,
q->fl[i].phys_addr, q->fl[i].size,
q->fl[i].buf_size, p->cong_thres, 1,
0);
q->fl[i].buf_size - SGE_PG_RSVD,
p->cong_thres, 1, 0);
if (ret)
goto err_unlock;
}
@ -3044,9 +3112,6 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
t3_write_reg(adapter, A_SG_GTS, V_RSPQ(q->rspq.cntxt_id) |
V_NEWTIMER(q->rspq.holdoff_tmr));
mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
return 0;
err_unlock:
@ -3056,6 +3121,27 @@ err:
return ret;
}
/**
* t3_start_sge_timers - start SGE timer call backs
* @adap: the adapter
*
* Starts each SGE queue set's timer call back
*/
void t3_start_sge_timers(struct adapter *adap)
{
int i;
for (i = 0; i < SGE_QSETS; ++i) {
struct sge_qset *q = &adap->sge.qs[i];
if (q->tx_reclaim_timer.function)
mod_timer(&q->tx_reclaim_timer, jiffies + TX_RECLAIM_PERIOD);
if (q->rx_reclaim_timer.function)
mod_timer(&q->rx_reclaim_timer, jiffies + RX_RECLAIM_PERIOD);
}
}
/**
* t3_stop_sge_timers - stop SGE timer call backs
* @adap: the adapter

Просмотреть файл

@ -493,20 +493,20 @@ int t3_phy_lasi_intr_handler(struct cphy *phy)
}
static const struct adapter_info t3_adap_info[] = {
{2, 0,
{1, 1, 0,
F_GPIO2_OEN | F_GPIO4_OEN |
F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
&mi1_mdio_ops, "Chelsio PE9000"},
{2, 0,
{1, 1, 0,
F_GPIO2_OEN | F_GPIO4_OEN |
F_GPIO2_OUT_VAL | F_GPIO4_OUT_VAL, { S_GPIO3, S_GPIO5 }, 0,
&mi1_mdio_ops, "Chelsio T302"},
{1, 0,
{1, 0, 0,
F_GPIO1_OEN | F_GPIO6_OEN | F_GPIO7_OEN | F_GPIO10_OEN |
F_GPIO11_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
{ 0 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
&mi1_mdio_ext_ops, "Chelsio T310"},
{2, 0,
{1, 1, 0,
F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO5_OEN | F_GPIO6_OEN |
F_GPIO7_OEN | F_GPIO10_OEN | F_GPIO11_OEN | F_GPIO1_OUT_VAL |
F_GPIO5_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
@ -514,7 +514,7 @@ static const struct adapter_info t3_adap_info[] = {
&mi1_mdio_ext_ops, "Chelsio T320"},
{},
{},
{1, 0,
{1, 0, 0,
F_GPIO1_OEN | F_GPIO2_OEN | F_GPIO4_OEN | F_GPIO6_OEN | F_GPIO7_OEN |
F_GPIO10_OEN | F_GPIO1_OUT_VAL | F_GPIO6_OUT_VAL | F_GPIO10_OUT_VAL,
{ S_GPIO9 }, SUPPORTED_10000baseT_Full | SUPPORTED_AUI,
@ -2128,16 +2128,40 @@ void t3_port_intr_clear(struct adapter *adapter, int idx)
static int t3_sge_write_context(struct adapter *adapter, unsigned int id,
unsigned int type)
{
t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
if (type == F_RESPONSEQ) {
/*
* Can't write the Response Queue Context bits for
* Interrupt Armed or the Reserve bits after the chip
* has been initialized out of reset. Writing to these
* bits can confuse the hardware.
*/
t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0x17ffffff);
t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
} else {
t3_write_reg(adapter, A_SG_CONTEXT_MASK0, 0xffffffff);
t3_write_reg(adapter, A_SG_CONTEXT_MASK1, 0xffffffff);
t3_write_reg(adapter, A_SG_CONTEXT_MASK2, 0xffffffff);
t3_write_reg(adapter, A_SG_CONTEXT_MASK3, 0xffffffff);
}
t3_write_reg(adapter, A_SG_CONTEXT_CMD,
V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
return t3_wait_op_done(adapter, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
0, SG_CONTEXT_CMD_ATTEMPTS, 1);
}
/**
* clear_sge_ctxt - completely clear an SGE context
* @adapter: the adapter
* @id: the context id
* @type: the context type
*
* Completely clear an SGE context. Used predominantly at post-reset
* initialization. Note in particular that we don't skip writing to any
* "sensitive bits" in the contexts the way that t3_sge_write_context()
* does ...
*/
static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
unsigned int type)
{
@ -2145,7 +2169,14 @@ static int clear_sge_ctxt(struct adapter *adap, unsigned int id,
t3_write_reg(adap, A_SG_CONTEXT_DATA1, 0);
t3_write_reg(adap, A_SG_CONTEXT_DATA2, 0);
t3_write_reg(adap, A_SG_CONTEXT_DATA3, 0);
return t3_sge_write_context(adap, id, type);
t3_write_reg(adap, A_SG_CONTEXT_MASK0, 0xffffffff);
t3_write_reg(adap, A_SG_CONTEXT_MASK1, 0xffffffff);
t3_write_reg(adap, A_SG_CONTEXT_MASK2, 0xffffffff);
t3_write_reg(adap, A_SG_CONTEXT_MASK3, 0xffffffff);
t3_write_reg(adap, A_SG_CONTEXT_CMD,
V_CONTEXT_CMD_OPCODE(1) | type | V_CONTEXT(id));
return t3_wait_op_done(adap, A_SG_CONTEXT_CMD, F_CONTEXT_CMD_BUSY,
0, SG_CONTEXT_CMD_ATTEMPTS, 1);
}
/**
@ -2729,10 +2760,10 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
F_MTUENABLE | V_WINDOWSCALEMODE(1) |
V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1));
t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
V_BYTETHRESHOLD(26880) | V_MSSTHRESHOLD(2) |
F_AUTOCAREFUL | F_AUTOENABLE | V_DACK_MODE(1));
t3_set_reg_field(adap, A_TP_IN_CONFIG, F_RXFBARBPRIO | F_TXFBARBPRIO,
F_IPV6ENABLE | F_NICMODE);
@ -3196,20 +3227,22 @@ int t3_mps_set_active_ports(struct adapter *adap, unsigned int port_mask)
}
/*
* Perform the bits of HW initialization that are dependent on the number
* of available ports.
* Perform the bits of HW initialization that are dependent on the Tx
* channels being used.
*/
static void init_hw_for_avail_ports(struct adapter *adap, int nports)
static void chan_init_hw(struct adapter *adap, unsigned int chan_map)
{
int i;
if (nports == 1) {
if (chan_map != 3) { /* one channel */
t3_set_reg_field(adap, A_ULPRX_CTL, F_ROUND_ROBIN, 0);
t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
F_PORT0ACTIVE | F_ENFORCEPKT);
t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
} else {
t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_ENFORCEPKT |
(chan_map == 1 ? F_TPTXPORT0EN | F_PORT0ACTIVE :
F_TPTXPORT1EN | F_PORT1ACTIVE));
t3_write_reg(adap, A_PM1_TX_CFG,
chan_map == 1 ? 0xffffffff : 0);
} else { /* two channels */
t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
t3_write_reg(adap, A_ULPTX_DMA_WEIGHT,
@ -3517,7 +3550,7 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
t3_write_reg(adapter, A_PM1_RX_MODE, 0);
t3_write_reg(adapter, A_PM1_TX_MODE, 0);
init_hw_for_avail_ports(adapter, adapter->params.nports);
chan_init_hw(adapter, adapter->params.chan_map);
t3_sge_init(adapter, &adapter->params.sge);
t3_write_reg(adapter, A_T3DBG_GPIO_ACT_LOW, calc_gpio_intr(adapter));
@ -3754,7 +3787,8 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
get_pci_mode(adapter, &adapter->params.pci);
adapter->params.info = ai;
adapter->params.nports = ai->nports;
adapter->params.nports = ai->nports0 + ai->nports1;
adapter->params.chan_map = !!ai->nports0 | (!!ai->nports1 << 1);
adapter->params.rev = t3_read_reg(adapter, A_PL_REV);
/*
* We used to only run the "adapter check task" once a second if
@ -3785,7 +3819,7 @@ int t3_prep_adapter(struct adapter *adapter, const struct adapter_info *ai,
mc7_prep(adapter, &adapter->pmtx, MC7_PMTX_BASE_ADDR, "PMTX");
mc7_prep(adapter, &adapter->cm, MC7_CM_BASE_ADDR, "CM");
p->nchan = ai->nports;
p->nchan = adapter->params.chan_map == 3 ? 2 : 1;
p->pmrx_size = t3_mc7_size(&adapter->pmrx);
p->pmtx_size = t3_mc7_size(&adapter->pmtx);
p->cm_size = t3_mc7_size(&adapter->cm);

Просмотреть файл

@ -566,6 +566,18 @@ MODULE_LICENSE("GPL");
outw(CSR0, DEPCA_ADDR);\
outw(STOP, DEPCA_DATA)
static const struct net_device_ops depca_netdev_ops = {
.ndo_open = depca_open,
.ndo_start_xmit = depca_start_xmit,
.ndo_stop = depca_close,
.ndo_set_multicast_list = set_multicast_list,
.ndo_do_ioctl = depca_ioctl,
.ndo_tx_timeout = depca_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int __init depca_hw_init (struct net_device *dev, struct device *device)
{
struct depca_private *lp;
@ -793,12 +805,7 @@ static int __init depca_hw_init (struct net_device *dev, struct device *device)
}
/* The DEPCA-specific entries in the device structure. */
dev->open = &depca_open;
dev->hard_start_xmit = &depca_start_xmit;
dev->stop = &depca_close;
dev->set_multicast_list = &set_multicast_list;
dev->do_ioctl = &depca_ioctl;
dev->tx_timeout = depca_tx_timeout;
dev->netdev_ops = &depca_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->mem_start = 0;

Просмотреть файл

@ -739,6 +739,17 @@ static void __init eepro_print_info (struct net_device *dev)
static const struct ethtool_ops eepro_ethtool_ops;
static const struct net_device_ops eepro_netdev_ops = {
.ndo_open = eepro_open,
.ndo_stop = eepro_close,
.ndo_start_xmit = eepro_send_packet,
.ndo_set_multicast_list = set_multicast_list,
.ndo_tx_timeout = eepro_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
/* This is the real probe routine. Linux has a history of friendly device
probes on the ISA bus. A good device probe avoids doing writes, and
verifies that the correct device exists and functions. */
@ -851,11 +862,7 @@ static int __init eepro_probe1(struct net_device *dev, int autoprobe)
}
}
dev->open = eepro_open;
dev->stop = eepro_close;
dev->hard_start_xmit = eepro_send_packet;
dev->set_multicast_list = &set_multicast_list;
dev->tx_timeout = eepro_tx_timeout;
dev->netdev_ops = &eepro_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
dev->ethtool_ops = &eepro_ethtool_ops;

Просмотреть файл

@ -1043,6 +1043,17 @@ static void eexp_hw_tx_pio(struct net_device *dev, unsigned short *buf,
lp->last_tx = jiffies;
}
static const struct net_device_ops eexp_netdev_ops = {
.ndo_open = eexp_open,
.ndo_stop = eexp_close,
.ndo_start_xmit = eexp_xmit,
.ndo_set_multicast_list = eexp_set_multicast,
.ndo_tx_timeout = eexp_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
/*
* Sanity check the suspected EtherExpress card
* Read hardware address, reset card, size memory and initialize buffer
@ -1163,11 +1174,7 @@ static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr)
lp->rx_buf_start = TX_BUF_START + (lp->num_tx_bufs*TX_BUF_SIZE);
lp->width = buswidth;
dev->open = eexp_open;
dev->stop = eexp_close;
dev->hard_start_xmit = eexp_xmit;
dev->set_multicast_list = &eexp_set_multicast;
dev->tx_timeout = eexp_timeout;
dev->netdev_ops = &eexp_netdev_ops;
dev->watchdog_timeo = 2*HZ;
return register_netdev(dev);

Просмотреть файл

@ -475,6 +475,17 @@ out:
}
#endif
static const struct net_device_ops eth16i_netdev_ops = {
.ndo_open = eth16i_open,
.ndo_stop = eth16i_close,
.ndo_start_xmit = eth16i_tx,
.ndo_set_multicast_list = eth16i_multicast,
.ndo_tx_timeout = eth16i_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int __init eth16i_probe1(struct net_device *dev, int ioaddr)
{
struct eth16i_local *lp = netdev_priv(dev);
@ -549,12 +560,7 @@ static int __init eth16i_probe1(struct net_device *dev, int ioaddr)
BITCLR(ioaddr + CONFIG_REG_1, POWERUP);
/* Initialize the device structure */
memset(lp, 0, sizeof(struct eth16i_local));
dev->open = eth16i_open;
dev->stop = eth16i_close;
dev->hard_start_xmit = eth16i_tx;
dev->set_multicast_list = eth16i_multicast;
dev->tx_timeout = eth16i_timeout;
dev->netdev_ops = &eth16i_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
spin_lock_init(&lp->lock);

1112
drivers/net/ethoc.c Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -388,6 +388,18 @@ static int __init ewrk3_probe1(struct net_device *dev, u_long iobase, int irq)
return err;
}
static const struct net_device_ops ewrk3_netdev_ops = {
.ndo_open = ewrk3_open,
.ndo_start_xmit = ewrk3_queue_pkt,
.ndo_stop = ewrk3_close,
.ndo_set_multicast_list = set_multicast_list,
.ndo_do_ioctl = ewrk3_ioctl,
.ndo_tx_timeout = ewrk3_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int __init
ewrk3_hw_init(struct net_device *dev, u_long iobase)
{
@ -603,16 +615,11 @@ ewrk3_hw_init(struct net_device *dev, u_long iobase)
printk(version);
}
/* The EWRK3-specific entries in the device structure. */
dev->open = ewrk3_open;
dev->hard_start_xmit = ewrk3_queue_pkt;
dev->stop = ewrk3_close;
dev->set_multicast_list = set_multicast_list;
dev->do_ioctl = ewrk3_ioctl;
dev->netdev_ops = &ewrk3_netdev_ops;
if (lp->adapter_name[4] == '3')
SET_ETHTOOL_OPS(dev, &ethtool_ops_203);
else
SET_ETHTOOL_OPS(dev, &ethtool_ops);
dev->tx_timeout = ewrk3_timeout;
dev->watchdog_timeo = QUEUE_PKT_TIMEOUT;
dev->mem_start = 0;

Просмотреть файл

@ -1239,19 +1239,9 @@ static int gfar_enet_open(struct net_device *dev)
return err;
}
static inline struct txfcb *gfar_add_fcb(struct sk_buff **skbp)
static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
{
struct txfcb *fcb;
struct sk_buff *skb = *skbp;
if (unlikely(skb_headroom(skb) < GMAC_FCB_LEN)) {
struct sk_buff *old_skb = skb;
skb = skb_realloc_headroom(old_skb, GMAC_FCB_LEN);
if (!skb)
return NULL;
dev_kfree_skb_any(old_skb);
}
fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
struct txfcb *fcb = (struct txfcb *)skb_push(skb, GMAC_FCB_LEN);
cacheable_memzero(fcb, GMAC_FCB_LEN);
return fcb;
@ -1320,6 +1310,22 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
base = priv->tx_bd_base;
/* make space for additional header when fcb is needed */
if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
(priv->vlgrp && vlan_tx_tag_present(skb))) &&
(skb_headroom(skb) < GMAC_FCB_LEN)) {
struct sk_buff *skb_new;
skb_new = skb_realloc_headroom(skb, GMAC_FCB_LEN);
if (!skb_new) {
dev->stats.tx_errors++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
kfree_skb(skb);
skb = skb_new;
}
/* total number of fragments in the SKB */
nr_frags = skb_shinfo(skb)->nr_frags;
@ -1372,20 +1378,18 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Set up checksumming */
if (CHECKSUM_PARTIAL == skb->ip_summed) {
fcb = gfar_add_fcb(&skb);
if (likely(fcb != NULL)) {
lstatus |= BD_LFLAG(TXBD_TOE);
gfar_tx_checksum(skb, fcb);
}
fcb = gfar_add_fcb(skb);
lstatus |= BD_LFLAG(TXBD_TOE);
gfar_tx_checksum(skb, fcb);
}
if (priv->vlgrp && vlan_tx_tag_present(skb)) {
if (unlikely(NULL == fcb))
fcb = gfar_add_fcb(&skb);
if (likely(fcb != NULL)) {
if (unlikely(NULL == fcb)) {
fcb = gfar_add_fcb(skb);
lstatus |= BD_LFLAG(TXBD_TOE);
gfar_tx_vlan(skb, fcb);
}
gfar_tx_vlan(skb, fcb);
}
/* setup the TxBD length and buffer pointer for the first BD */
@ -1433,7 +1437,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Unlock priv */
spin_unlock_irqrestore(&priv->txlock, flags);
return 0;
return NETDEV_TX_OK;
}
/* Stops the kernel queue, and halts the controller */

Просмотреть файл

@ -905,6 +905,17 @@ static char *ibmlana_adapter_names[] __devinitdata = {
NULL
};
static const struct net_device_ops ibmlana_netdev_ops = {
.ndo_open = ibmlana_open,
.ndo_stop = ibmlana_close,
.ndo_start_xmit = ibmlana_tx,
.ndo_set_multicast_list = ibmlana_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int __devinit ibmlana_init_one(struct device *kdev)
{
struct mca_device *mdev = to_mca_device(kdev);
@ -973,11 +984,7 @@ static int __devinit ibmlana_init_one(struct device *kdev)
mca_device_set_claim(mdev, 1);
/* set methods */
dev->open = ibmlana_open;
dev->stop = ibmlana_close;
dev->hard_start_xmit = ibmlana_tx;
dev->set_multicast_list = ibmlana_set_multicast_list;
dev->netdev_ops = &ibmlana_netdev_ops;
dev->flags |= IFF_MULTICAST;
/* copy out MAC address */

Просмотреть файл

@ -1524,6 +1524,13 @@ toshoboe_close (struct pci_dev *pci_dev)
free_netdev(self->netdev);
}
static const struct net_device_ops toshoboe_netdev_ops = {
.ndo_open = toshoboe_net_open,
.ndo_stop = toshoboe_net_close,
.ndo_start_xmit = toshoboe_hard_xmit,
.ndo_do_ioctl = toshoboe_net_ioctl,
};
static int
toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
{
@ -1657,10 +1664,7 @@ toshoboe_open (struct pci_dev *pci_dev, const struct pci_device_id *pdid)
#endif
SET_NETDEV_DEV(dev, &pci_dev->dev);
dev->hard_start_xmit = toshoboe_hard_xmit;
dev->open = toshoboe_net_open;
dev->stop = toshoboe_net_close;
dev->do_ioctl = toshoboe_net_ioctl;
dev->netdev_ops = &toshoboe_netdev_ops;
err = register_netdev(dev);
if (err)

Просмотреть файл

@ -454,6 +454,18 @@ out:
}
#endif
static const struct net_device_ops lance_netdev_ops = {
.ndo_open = lance_open,
.ndo_start_xmit = lance_start_xmit,
.ndo_stop = lance_close,
.ndo_get_stats = lance_get_stats,
.ndo_set_multicast_list = set_multicast_list,
.ndo_tx_timeout = lance_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int options)
{
struct lance_private *lp;
@ -714,12 +726,7 @@ static int __init lance_probe1(struct net_device *dev, int ioaddr, int irq, int
printk(version);
/* The LANCE-specific entries in the device structure. */
dev->open = lance_open;
dev->hard_start_xmit = lance_start_xmit;
dev->stop = lance_close;
dev->get_stats = lance_get_stats;
dev->set_multicast_list = set_multicast_list;
dev->tx_timeout = lance_tx_timeout;
dev->netdev_ops = &lance_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
err = register_netdev(dev);

Просмотреть файл

@ -952,6 +952,17 @@ static void print_eth(char *add)
(unsigned char) add[12], (unsigned char) add[13]);
}
static const struct net_device_ops i596_netdev_ops = {
.ndo_open = i596_open,
.ndo_stop = i596_close,
.ndo_start_xmit = i596_start_xmit,
.ndo_set_multicast_list = set_multicast_list,
.ndo_tx_timeout = i596_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int __init lp486e_probe(struct net_device *dev) {
struct i596_private *lp;
unsigned char eth_addr[6] = { 0, 0xaa, 0, 0, 0, 0 };
@ -1014,12 +1025,8 @@ static int __init lp486e_probe(struct net_device *dev) {
printk("\n");
/* The LP486E-specific entries in the device structure. */
dev->open = &i596_open;
dev->stop = &i596_close;
dev->hard_start_xmit = &i596_start_xmit;
dev->set_multicast_list = &set_multicast_list;
dev->netdev_ops = &i596_netdev_ops;
dev->watchdog_timeo = 5*HZ;
dev->tx_timeout = i596_tx_timeout;
#if 0
/* selftest reports 0x320925ae - don't know what that means */

Просмотреть файл

@ -441,6 +441,18 @@ out:
return ERR_PTR(err);
}
static const struct net_device_ops ni52_netdev_ops = {
.ndo_open = ni52_open,
.ndo_stop = ni52_close,
.ndo_get_stats = ni52_get_stats,
.ndo_tx_timeout = ni52_timeout,
.ndo_start_xmit = ni52_send_packet,
.ndo_set_multicast_list = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int __init ni52_probe1(struct net_device *dev, int ioaddr)
{
int i, size, retval;
@ -561,15 +573,8 @@ static int __init ni52_probe1(struct net_device *dev, int ioaddr)
printk("IRQ %d (assigned and not checked!).\n", dev->irq);
}
dev->open = ni52_open;
dev->stop = ni52_close;
dev->get_stats = ni52_get_stats;
dev->tx_timeout = ni52_timeout;
dev->netdev_ops = &ni52_netdev_ops;
dev->watchdog_timeo = HZ/20;
dev->hard_start_xmit = ni52_send_packet;
dev->set_multicast_list = set_multicast_list;
dev->if_port = 0;
return 0;
out:

Просмотреть файл

@ -237,7 +237,7 @@ struct priv
void *tmdbounce[TMDNUM];
int tmdbouncenum;
int lock,xmit_queued;
struct net_device_stats stats;
void *self;
int cmdr_addr;
int cardno;
@ -257,7 +257,6 @@ static void ni65_timeout(struct net_device *dev);
static int ni65_close(struct net_device *dev);
static int ni65_alloc_buffer(struct net_device *dev);
static void ni65_free_buffer(struct priv *p);
static struct net_device_stats *ni65_get_stats(struct net_device *);
static void set_multicast_list(struct net_device *dev);
static int irqtab[] __initdata = { 9,12,15,5 }; /* irq config-translate */
@ -401,6 +400,17 @@ out:
return ERR_PTR(err);
}
static const struct net_device_ops ni65_netdev_ops = {
.ndo_open = ni65_open,
.ndo_stop = ni65_close,
.ndo_start_xmit = ni65_send_packet,
.ndo_tx_timeout = ni65_timeout,
.ndo_set_multicast_list = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
/*
* this is the real card probe ..
*/
@ -549,13 +559,9 @@ static int __init ni65_probe1(struct net_device *dev,int ioaddr)
}
dev->base_addr = ioaddr;
dev->open = ni65_open;
dev->stop = ni65_close;
dev->hard_start_xmit = ni65_send_packet;
dev->tx_timeout = ni65_timeout;
dev->netdev_ops = &ni65_netdev_ops;
dev->watchdog_timeo = HZ/2;
dev->get_stats = ni65_get_stats;
dev->set_multicast_list = set_multicast_list;
return 0; /* everything is OK */
}
@ -901,13 +907,13 @@ static irqreturn_t ni65_interrupt(int irq, void * dev_id)
if(debuglevel > 1)
printk(KERN_ERR "%s: general error: %04x.\n",dev->name,csr0);
if(csr0 & CSR0_BABL)
p->stats.tx_errors++;
dev->stats.tx_errors++;
if(csr0 & CSR0_MISS) {
int i;
for(i=0;i<RMDNUM;i++)
printk("%02x ",p->rmdhead[i].u.s.status);
printk("\n");
p->stats.rx_errors++;
dev->stats.rx_errors++;
}
if(csr0 & CSR0_MERR) {
if(debuglevel > 1)
@ -997,12 +1003,12 @@ static void ni65_xmit_intr(struct net_device *dev,int csr0)
#endif
/* checking some errors */
if(tmdp->status2 & XMIT_RTRY)
p->stats.tx_aborted_errors++;
dev->stats.tx_aborted_errors++;
if(tmdp->status2 & XMIT_LCAR)
p->stats.tx_carrier_errors++;
dev->stats.tx_carrier_errors++;
if(tmdp->status2 & (XMIT_BUFF | XMIT_UFLO )) {
/* this stops the xmitter */
p->stats.tx_fifo_errors++;
dev->stats.tx_fifo_errors++;
if(debuglevel > 0)
printk(KERN_ERR "%s: Xmit FIFO/BUFF error\n",dev->name);
if(p->features & INIT_RING_BEFORE_START) {
@ -1016,12 +1022,12 @@ static void ni65_xmit_intr(struct net_device *dev,int csr0)
if(debuglevel > 2)
printk(KERN_ERR "%s: xmit-error: %04x %02x-%04x\n",dev->name,csr0,(int) tmdstat,(int) tmdp->status2);
if(!(csr0 & CSR0_BABL)) /* don't count errors twice */
p->stats.tx_errors++;
dev->stats.tx_errors++;
tmdp->status2 = 0;
}
else {
p->stats.tx_bytes -= (short)(tmdp->blen);
p->stats.tx_packets++;
dev->stats.tx_bytes -= (short)(tmdp->blen);
dev->stats.tx_packets++;
}
#ifdef XMT_VIA_SKB
@ -1057,7 +1063,7 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
if(!(rmdstat & RCV_ERR)) {
if(rmdstat & RCV_START)
{
p->stats.rx_length_errors++;
dev->stats.rx_length_errors++;
printk(KERN_ERR "%s: recv, packet too long: %d\n",dev->name,rmdp->mlen & 0x0fff);
}
}
@ -1066,16 +1072,16 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
printk(KERN_ERR "%s: receive-error: %04x, lance-status: %04x/%04x\n",
dev->name,(int) rmdstat,csr0,(int) inw(PORT+L_DATAREG) );
if(rmdstat & RCV_FRAM)
p->stats.rx_frame_errors++;
dev->stats.rx_frame_errors++;
if(rmdstat & RCV_OFLO)
p->stats.rx_over_errors++;
dev->stats.rx_over_errors++;
if(rmdstat & RCV_CRC)
p->stats.rx_crc_errors++;
dev->stats.rx_crc_errors++;
if(rmdstat & RCV_BUF_ERR)
p->stats.rx_fifo_errors++;
dev->stats.rx_fifo_errors++;
}
if(!(csr0 & CSR0_MISS)) /* don't count errors twice */
p->stats.rx_errors++;
dev->stats.rx_errors++;
}
else if( (len = (rmdp->mlen & 0x0fff) - 4) >= 60)
{
@ -1106,20 +1112,20 @@ static void ni65_recv_intr(struct net_device *dev,int csr0)
skb_put(skb,len);
skb_copy_to_linear_data(skb, (unsigned char *) p->recvbounce[p->rmdnum],len);
#endif
p->stats.rx_packets++;
p->stats.rx_bytes += len;
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
skb->protocol=eth_type_trans(skb,dev);
netif_rx(skb);
}
else
{
printk(KERN_ERR "%s: can't alloc new sk_buff\n",dev->name);
p->stats.rx_dropped++;
dev->stats.rx_dropped++;
}
}
else {
printk(KERN_INFO "%s: received runt packet\n",dev->name);
p->stats.rx_errors++;
dev->stats.rx_errors++;
}
rmdp->blen = -(R_BUF_SIZE-8);
rmdp->mlen = 0;
@ -1213,23 +1219,6 @@ static int ni65_send_packet(struct sk_buff *skb, struct net_device *dev)
return 0;
}
static struct net_device_stats *ni65_get_stats(struct net_device *dev)
{
#if 0
int i;
struct priv *p = dev->ml_priv;
for(i=0;i<RMDNUM;i++)
{
struct rmd *rmdp = p->rmdhead + ((p->rmdnum + i) & (RMDNUM-1));
printk("%02x ",rmdp->u.s.status);
}
printk("\n");
#endif
return &((struct priv *)dev->ml_priv)->stats;
}
static void set_multicast_list(struct net_device *dev)
{
if(!ni65_lance_reinit(dev))

Просмотреть файл

@ -143,6 +143,17 @@ out:
return ERR_PTR(err);
}
static const struct net_device_ops seeq8005_netdev_ops = {
.ndo_open = seeq8005_open,
.ndo_stop = seeq8005_close,
.ndo_start_xmit = seeq8005_send_packet,
.ndo_tx_timeout = seeq8005_timeout,
.ndo_set_multicast_list = set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
/* This is the real probe routine. Linux has a history of friendly device
probes on the ISA bus. A good device probes avoids doing writes, and
verifies that the correct device exists and functions. */
@ -332,12 +343,8 @@ static int __init seeq8005_probe1(struct net_device *dev, int ioaddr)
}
}
#endif
dev->open = seeq8005_open;
dev->stop = seeq8005_close;
dev->hard_start_xmit = seeq8005_send_packet;
dev->tx_timeout = seeq8005_timeout;
dev->netdev_ops = &seeq8005_netdev_ops;
dev->watchdog_timeo = HZ/20;
dev->set_multicast_list = set_multicast_list;
dev->flags &= ~IFF_MULTICAST;
return 0;

Просмотреть файл

@ -142,9 +142,6 @@ static int __init do_ultra_probe(struct net_device *dev)
int base_addr = dev->base_addr;
int irq = dev->irq;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = &ultra_poll;
#endif
if (base_addr > 0x1ff) /* Check a single specified location. */
return ultra_probe1(dev, base_addr);
else if (base_addr != 0) /* Don't probe at all. */
@ -199,7 +196,7 @@ static const struct net_device_ops ultra_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,
.ndo_poll_controller = ultra_poll,
#endif
};

Просмотреть файл

@ -153,6 +153,22 @@ out:
return ERR_PTR(err);
}
static const struct net_device_ops ultra32_netdev_ops = {
.ndo_open = ultra32_open,
.ndo_stop = ultra32_close,
.ndo_start_xmit = ei_start_xmit,
.ndo_tx_timeout = ei_tx_timeout,
.ndo_get_stats = ei_get_stats,
.ndo_set_multicast_list = ei_set_multicast_list,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_change_mtu = eth_change_mtu,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = ei_poll,
#endif
};
static int __init ultra32_probe1(struct net_device *dev, int ioaddr)
{
int i, edge, media, retval;
@ -273,11 +289,8 @@ static int __init ultra32_probe1(struct net_device *dev, int ioaddr)
ei_status.block_output = &ultra32_block_output;
ei_status.get_8390_hdr = &ultra32_get_8390_hdr;
ei_status.reset_8390 = &ultra32_reset_8390;
dev->open = &ultra32_open;
dev->stop = &ultra32_close;
#ifdef CONFIG_NET_POLL_CONTROLLER
dev->poll_controller = ei_poll;
#endif
dev->netdev_ops = &ultra32_netdev_ops;
NS8390_init(dev, 0);
return 0;

Просмотреть файл

@ -831,6 +831,17 @@ static int __init smc_findirq(int ioaddr)
#endif
}
static const struct net_device_ops smc_netdev_ops = {
.ndo_open = smc_open,
.ndo_stop = smc_close,
.ndo_start_xmit = smc_wait_to_send_packet,
.ndo_tx_timeout = smc_timeout,
.ndo_set_multicast_list = smc_set_multicast_list,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
/*----------------------------------------------------------------------
. Function: smc_probe( int ioaddr )
.
@ -1044,12 +1055,8 @@ static int __init smc_probe(struct net_device *dev, int ioaddr)
goto err_out;
}
dev->open = smc_open;
dev->stop = smc_close;
dev->hard_start_xmit = smc_wait_to_send_packet;
dev->tx_timeout = smc_timeout;
dev->netdev_ops = &smc_netdev_ops;
dev->watchdog_timeo = HZ/20;
dev->set_multicast_list = smc_set_multicast_list;
return 0;

Просмотреть файл

@ -1680,6 +1680,7 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
u8 address, u8 data)
{
u32 op = E2P_CMD_EPC_CMD_ERASE_ | address;
u32 temp;
int ret;
SMSC_TRACE(DRV, "address 0x%x, data 0x%x", address, data);
@ -1688,6 +1689,10 @@ static int smsc911x_eeprom_write_location(struct smsc911x_data *pdata,
if (!ret) {
op = E2P_CMD_EPC_CMD_WRITE_ | address;
smsc911x_reg_write(pdata, E2P_DATA, (u32)data);
/* Workaround for hardware read-after-write restriction */
temp = smsc911x_reg_read(pdata, BYTE_TEST);
ret = smsc911x_eeprom_send_cmd(pdata, op);
}

Просмотреть файл

@ -142,7 +142,7 @@ static void madgemc_sifwritew(struct net_device *dev, unsigned short val, unsign
return;
}
static struct net_device_ops madgemc_netdev_ops __read_mostly;
static int __devinit madgemc_probe(struct device *device)
{
@ -168,7 +168,7 @@ static int __devinit madgemc_probe(struct device *device)
goto getout;
}
dev->dma = 0;
dev->netdev_ops = &madgemc_netdev_ops;
card = kmalloc(sizeof(struct card_info), GFP_KERNEL);
if (card==NULL) {
@ -348,9 +348,6 @@ static int __devinit madgemc_probe(struct device *device)
memcpy(tp->ProductID, "Madge MCA 16/4 ", PROD_ID_SIZE + 1);
dev->open = madgemc_open;
dev->stop = madgemc_close;
tp->tmspriv = card;
dev_set_drvdata(device, dev);
@ -758,6 +755,10 @@ static struct mca_driver madgemc_driver = {
static int __init madgemc_init (void)
{
madgemc_netdev_ops = tms380tr_netdev_ops;
madgemc_netdev_ops.ndo_open = madgemc_open;
madgemc_netdev_ops.ndo_stop = madgemc_close;
return mca_register_driver (&madgemc_driver);
}

Просмотреть файл

@ -116,6 +116,8 @@ nodev:
return -ENODEV;
}
static struct net_device_ops proteon_netdev_ops __read_mostly;
static int __init setup_card(struct net_device *dev, struct device *pdev)
{
struct net_local *tp;
@ -167,8 +169,7 @@ static int __init setup_card(struct net_device *dev, struct device *pdev)
tp->tmspriv = NULL;
dev->open = proteon_open;
dev->stop = tms380tr_close;
dev->netdev_ops = &proteon_netdev_ops;
if (dev->irq == 0)
{
@ -352,6 +353,10 @@ static int __init proteon_init(void)
struct platform_device *pdev;
int i, num = 0, err = 0;
proteon_netdev_ops = tms380tr_netdev_ops;
proteon_netdev_ops.ndo_open = proteon_open;
proteon_netdev_ops.ndo_stop = tms380tr_close;
err = platform_driver_register(&proteon_driver);
if (err)
return err;

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше