2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
|
|
|
|
* Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
|
|
|
|
*
|
2005-09-12 05:18:10 +04:00
|
|
|
* Right now, I am very wasteful with the buffers. I allocate memory
|
2005-04-17 02:20:36 +04:00
|
|
|
* pages and then divide them into 2K frame buffers. This way I know I
|
|
|
|
* have buffers large enough to hold one frame within one buffer descriptor.
|
|
|
|
* Once I get this working, I will use 64 or 128 byte CPM buffers, which
|
|
|
|
* will be much more memory efficient and will easily handle lots of
|
|
|
|
* small packets.
|
|
|
|
*
|
|
|
|
* Much better multiple PHY support by Magnus Damm.
|
|
|
|
* Copyright (c) 2000 Ericsson Radio Systems AB.
|
|
|
|
*
|
2005-11-07 07:09:50 +03:00
|
|
|
* Support for FEC controller of ColdFire processors.
|
|
|
|
* Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
|
2005-09-12 05:18:10 +04:00
|
|
|
*
|
|
|
|
* Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
|
2006-06-27 07:05:33 +04:00
|
|
|
* Copyright (c) 2004-2006 Macq Electronique SA.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/string.h>
|
|
|
|
#include <linux/ptrace.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/pci.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/spinlock.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/bitops.h>
|
2009-01-29 02:03:05 +03:00
|
|
|
#include <linux/io.h>
|
|
|
|
#include <linux/irq.h>
|
2009-01-29 02:03:10 +03:00
|
|
|
#include <linux/clk.h>
|
2009-01-29 02:03:11 +03:00
|
|
|
#include <linux/platform_device.h>
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-07-30 10:28:46 +04:00
|
|
|
#include <asm/cacheflush.h>
|
2009-01-29 02:03:10 +03:00
|
|
|
|
|
|
|
#ifndef CONFIG_ARCH_MXC
|
2005-04-17 02:20:36 +04:00
|
|
|
#include <asm/coldfire.h>
|
|
|
|
#include <asm/mcfsim.h>
|
2009-01-29 02:03:10 +03:00
|
|
|
#endif
|
2009-01-29 02:03:05 +03:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
#include "fec.h"
|
|
|
|
|
2009-01-29 02:03:10 +03:00
|
|
|
#ifdef CONFIG_ARCH_MXC
|
|
|
|
#include <mach/hardware.h>
|
|
|
|
#define FEC_ALIGNMENT 0xf
|
|
|
|
#else
|
|
|
|
#define FEC_ALIGNMENT 0x3
|
|
|
|
#endif
|
|
|
|
|
2009-01-29 02:03:11 +03:00
|
|
|
/*
|
|
|
|
* Define the fixed address of the FEC hardware.
|
|
|
|
*/
|
2008-06-06 09:55:36 +04:00
|
|
|
#if defined(CONFIG_M5272)
|
2008-05-01 08:04:02 +04:00
|
|
|
#define HAVE_mii_link_interrupt
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
static unsigned char fec_mac_default[] = {
|
|
|
|
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Some hardware gets it MAC address out of local flash memory.
|
|
|
|
* if this is non-zero then assume it is the address to get MAC from.
|
|
|
|
*/
|
|
|
|
#if defined(CONFIG_NETtel)
|
|
|
|
#define FEC_FLASHMAC 0xf0006006
|
|
|
|
#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
|
|
|
|
#define FEC_FLASHMAC 0xf0006000
|
|
|
|
#elif defined(CONFIG_CANCam)
|
|
|
|
#define FEC_FLASHMAC 0xf0020000
|
2005-09-12 05:18:10 +04:00
|
|
|
#elif defined (CONFIG_M5272C3)
|
|
|
|
#define FEC_FLASHMAC (0xffe04000 + 4)
|
|
|
|
#elif defined(CONFIG_MOD5272)
|
|
|
|
#define FEC_FLASHMAC 0xffc0406b
|
2005-04-17 02:20:36 +04:00
|
|
|
#else
|
|
|
|
#define FEC_FLASHMAC 0
|
|
|
|
#endif
|
2009-02-27 09:42:51 +03:00
|
|
|
#endif /* CONFIG_M5272 */
|
2009-01-29 02:03:11 +03:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Forward declarations of some structures to support different PHYs */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint mii_data;
|
|
|
|
void (*funct)(uint mii_reg, struct net_device *dev);
|
|
|
|
} phy_cmd_t;
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
uint id;
|
|
|
|
char *name;
|
|
|
|
|
|
|
|
const phy_cmd_t *config;
|
|
|
|
const phy_cmd_t *startup;
|
|
|
|
const phy_cmd_t *ack_int;
|
|
|
|
const phy_cmd_t *shutdown;
|
|
|
|
} phy_info_t;
|
|
|
|
|
|
|
|
/* The number of Tx and Rx buffers. These are allocated from the page
|
|
|
|
* pool. The code may assume these are power of two, so it it best
|
|
|
|
* to keep them that size.
|
|
|
|
* We don't need to allocate pages for the transmitter. We just use
|
|
|
|
* the skbuffer directly.
|
|
|
|
*/
|
|
|
|
#define FEC_ENET_RX_PAGES 8
|
|
|
|
#define FEC_ENET_RX_FRSIZE 2048
|
|
|
|
#define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE)
|
|
|
|
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
|
|
|
|
#define FEC_ENET_TX_FRSIZE 2048
|
|
|
|
#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
|
|
|
|
#define TX_RING_SIZE 16 /* Must be power of two */
|
|
|
|
#define TX_RING_MOD_MASK 15 /* for this to work */
|
|
|
|
|
2005-11-07 07:09:50 +03:00
|
|
|
#if (((RX_RING_SIZE + TX_RING_SIZE) * 8) > PAGE_SIZE)
|
2006-06-27 07:10:56 +04:00
|
|
|
#error "FEC: descriptor ring size constants too large"
|
2005-11-07 07:09:50 +03:00
|
|
|
#endif
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Interrupt events/masks. */
|
2005-04-17 02:20:36 +04:00
|
|
|
#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
|
|
|
|
#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
|
|
|
|
#define FEC_ENET_BABT ((uint)0x20000000) /* Babbling transmitter */
|
|
|
|
#define FEC_ENET_GRA ((uint)0x10000000) /* Graceful stop complete */
|
|
|
|
#define FEC_ENET_TXF ((uint)0x08000000) /* Full frame transmitted */
|
|
|
|
#define FEC_ENET_TXB ((uint)0x04000000) /* A buffer was transmitted */
|
|
|
|
#define FEC_ENET_RXF ((uint)0x02000000) /* Full frame received */
|
|
|
|
#define FEC_ENET_RXB ((uint)0x01000000) /* A buffer was received */
|
|
|
|
#define FEC_ENET_MII ((uint)0x00800000) /* MII interrupt */
|
|
|
|
#define FEC_ENET_EBERR ((uint)0x00400000) /* SDMA bus error */
|
|
|
|
|
|
|
|
/* The FEC stores dest/src/type, data, and checksum for receive packets.
|
|
|
|
*/
|
|
|
|
#define PKT_MAXBUF_SIZE 1518
|
|
|
|
#define PKT_MINBUF_SIZE 64
|
|
|
|
#define PKT_MAXBLR_SIZE 1520
|
|
|
|
|
|
|
|
|
|
|
|
/*
|
2006-06-27 07:10:56 +04:00
|
|
|
* The 5270/5271/5280/5282/532x RX control register also contains maximum frame
|
2005-04-17 02:20:36 +04:00
|
|
|
* size bits. Other FEC hardware does not, so we need to take that into
|
|
|
|
* account when setting it.
|
|
|
|
*/
|
2005-11-07 07:09:50 +03:00
|
|
|
#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
|
2009-01-29 02:03:10 +03:00
|
|
|
defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC)
|
2005-04-17 02:20:36 +04:00
|
|
|
#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
|
|
|
|
#else
|
|
|
|
#define OPT_FRAME_SIZE 0
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* The FEC buffer descriptors track the ring buffers. The rx_bd_base and
|
|
|
|
* tx_bd_base always point to the base of the buffer descriptors. The
|
|
|
|
* cur_rx and cur_tx point to the currently available buffer.
|
|
|
|
* The dirty_tx tracks the current buffer that is being sent by the
|
|
|
|
* controller. The cur_tx and dirty_tx are equal under both completely
|
|
|
|
* empty and completely full conditions. The empty/ready indicator in
|
|
|
|
* the buffer descriptor determines the actual condition.
|
|
|
|
*/
|
|
|
|
struct fec_enet_private {
|
|
|
|
/* Hardware registers of the FEC device */
|
2009-04-15 07:11:30 +04:00
|
|
|
void __iomem *hwp;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2007-07-30 10:29:09 +04:00
|
|
|
struct net_device *netdev;
|
|
|
|
|
2009-01-29 02:03:11 +03:00
|
|
|
struct clk *clk;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* The saved address of a sent-in-place packet/buffer, for skfree(). */
|
|
|
|
unsigned char *tx_bounce[TX_RING_SIZE];
|
|
|
|
struct sk_buff* tx_skbuff[TX_RING_SIZE];
|
2009-04-15 05:32:24 +04:00
|
|
|
struct sk_buff* rx_skbuff[RX_RING_SIZE];
|
2005-04-17 02:20:36 +04:00
|
|
|
ushort skb_cur;
|
|
|
|
ushort skb_dirty;
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* CPM dual port RAM relative addresses */
|
2009-01-29 02:03:07 +03:00
|
|
|
dma_addr_t bd_dma;
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Address of Rx and Tx buffers */
|
2009-04-15 05:32:16 +04:00
|
|
|
struct bufdesc *rx_bd_base;
|
|
|
|
struct bufdesc *tx_bd_base;
|
|
|
|
/* The next free ring entry */
|
|
|
|
struct bufdesc *cur_rx, *cur_tx;
|
2009-04-15 05:32:18 +04:00
|
|
|
/* The ring entries to be free()ed */
|
2009-04-15 05:32:16 +04:00
|
|
|
struct bufdesc *dirty_tx;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
uint tx_full;
|
2008-05-01 08:08:12 +04:00
|
|
|
/* hold while accessing the HW like ringbuffer for tx/rx but not MAC */
|
|
|
|
spinlock_t hw_lock;
|
|
|
|
/* hold while accessing the mii_list_t() elements */
|
|
|
|
spinlock_t mii_lock;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
uint phy_id;
|
|
|
|
uint phy_id_done;
|
|
|
|
uint phy_status;
|
|
|
|
uint phy_speed;
|
2005-09-12 05:18:10 +04:00
|
|
|
phy_info_t const *phy;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct work_struct phy_task;
|
|
|
|
|
|
|
|
uint sequence_done;
|
|
|
|
uint mii_phy_task_queued;
|
|
|
|
|
|
|
|
uint phy_addr;
|
|
|
|
|
|
|
|
int index;
|
|
|
|
int opened;
|
|
|
|
int link;
|
|
|
|
int old_link;
|
|
|
|
int full_duplex;
|
|
|
|
};
|
|
|
|
|
|
|
|
static void fec_enet_mii(struct net_device *dev);
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 17:55:46 +04:00
|
|
|
static irqreturn_t fec_enet_interrupt(int irq, void * dev_id);
|
2005-04-17 02:20:36 +04:00
|
|
|
static void fec_enet_tx(struct net_device *dev);
|
|
|
|
static void fec_enet_rx(struct net_device *dev);
|
|
|
|
static int fec_enet_close(struct net_device *dev);
|
|
|
|
static void fec_restart(struct net_device *dev, int duplex);
|
|
|
|
static void fec_stop(struct net_device *dev);
|
|
|
|
|
|
|
|
|
|
|
|
/* MII processing. We keep this as simple as possible. Requests are
|
|
|
|
* placed on the list (if there is room). When the request is finished
|
|
|
|
* by the MII, an optional function may be called.
|
|
|
|
*/
|
|
|
|
typedef struct mii_list {
|
|
|
|
uint mii_regval;
|
|
|
|
void (*mii_func)(uint val, struct net_device *dev);
|
|
|
|
struct mii_list *mii_next;
|
|
|
|
} mii_list_t;
|
|
|
|
|
|
|
|
#define NMII 20
|
2005-09-12 05:18:10 +04:00
|
|
|
static mii_list_t mii_cmds[NMII];
|
|
|
|
static mii_list_t *mii_free;
|
|
|
|
static mii_list_t *mii_head;
|
|
|
|
static mii_list_t *mii_tail;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-09-13 21:24:59 +04:00
|
|
|
static int mii_queue(struct net_device *dev, int request,
|
2005-04-17 02:20:36 +04:00
|
|
|
void (*func)(uint, struct net_device *));
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Make MII read/write commands for the FEC */
|
2005-04-17 02:20:36 +04:00
|
|
|
#define mk_mii_read(REG) (0x60020000 | ((REG & 0x1f) << 18))
|
|
|
|
#define mk_mii_write(REG, VAL) (0x50020000 | ((REG & 0x1f) << 18) | \
|
|
|
|
(VAL & 0xffff))
|
|
|
|
#define mk_mii_end 0
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Transmitter timeout */
|
|
|
|
#define TX_TIMEOUT (2 * HZ)
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Register definitions for the PHY */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#define MII_REG_CR 0 /* Control Register */
|
|
|
|
#define MII_REG_SR 1 /* Status Register */
|
|
|
|
#define MII_REG_PHYIR1 2 /* PHY Identification Register 1 */
|
|
|
|
#define MII_REG_PHYIR2 3 /* PHY Identification Register 2 */
|
2006-09-13 21:24:59 +04:00
|
|
|
#define MII_REG_ANAR 4 /* A-N Advertisement Register */
|
2005-04-17 02:20:36 +04:00
|
|
|
#define MII_REG_ANLPAR 5 /* A-N Link Partner Ability Register */
|
|
|
|
#define MII_REG_ANER 6 /* A-N Expansion Register */
|
|
|
|
#define MII_REG_ANNPTR 7 /* A-N Next Page Transmit Register */
|
|
|
|
#define MII_REG_ANLPRNPR 8 /* A-N Link Partner Received Next Page Reg. */
|
|
|
|
|
|
|
|
/* values for phy_status */
|
|
|
|
|
|
|
|
#define PHY_CONF_ANE 0x0001 /* 1 auto-negotiation enabled */
|
|
|
|
#define PHY_CONF_LOOP 0x0002 /* 1 loopback mode enabled */
|
|
|
|
#define PHY_CONF_SPMASK 0x00f0 /* mask for speed */
|
|
|
|
#define PHY_CONF_10HDX 0x0010 /* 10 Mbit half duplex supported */
|
2006-09-13 21:24:59 +04:00
|
|
|
#define PHY_CONF_10FDX 0x0020 /* 10 Mbit full duplex supported */
|
2005-04-17 02:20:36 +04:00
|
|
|
#define PHY_CONF_100HDX 0x0040 /* 100 Mbit half duplex supported */
|
2006-09-13 21:24:59 +04:00
|
|
|
#define PHY_CONF_100FDX 0x0080 /* 100 Mbit full duplex supported */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
#define PHY_STAT_LINK 0x0100 /* 1 up - 0 down */
|
|
|
|
#define PHY_STAT_FAULT 0x0200 /* 1 remote fault */
|
|
|
|
#define PHY_STAT_ANC 0x0400 /* 1 auto-negotiation complete */
|
|
|
|
#define PHY_STAT_SPMASK 0xf000 /* mask for speed */
|
|
|
|
#define PHY_STAT_10HDX 0x1000 /* 10 Mbit half duplex selected */
|
2006-09-13 21:24:59 +04:00
|
|
|
#define PHY_STAT_10FDX 0x2000 /* 10 Mbit full duplex selected */
|
2005-04-17 02:20:36 +04:00
|
|
|
#define PHY_STAT_100HDX 0x4000 /* 100 Mbit half duplex selected */
|
2006-09-13 21:24:59 +04:00
|
|
|
#define PHY_STAT_100FDX 0x8000 /* 100 Mbit full duplex selected */
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
|
|
|
|
static int
|
|
|
|
fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
2009-04-15 07:11:30 +04:00
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
2009-04-15 05:32:16 +04:00
|
|
|
struct bufdesc *bdp;
|
2009-08-06 21:58:18 +04:00
|
|
|
void *bufaddr;
|
2006-06-27 07:19:33 +04:00
|
|
|
unsigned short status;
|
2008-05-01 08:08:12 +04:00
|
|
|
unsigned long flags;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (!fep->link) {
|
|
|
|
/* Link is down or autonegotiation is in progress. */
|
2009-06-12 10:22:29 +04:00
|
|
|
return NETDEV_TX_BUSY;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2008-05-01 08:08:12 +04:00
|
|
|
spin_lock_irqsave(&fep->hw_lock, flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
/* Fill in a Tx ring entry */
|
|
|
|
bdp = fep->cur_tx;
|
|
|
|
|
2006-06-27 07:19:33 +04:00
|
|
|
status = bdp->cbd_sc;
|
2009-04-15 05:32:18 +04:00
|
|
|
|
2006-06-27 07:19:33 +04:00
|
|
|
if (status & BD_ENET_TX_READY) {
|
2005-04-17 02:20:36 +04:00
|
|
|
/* Ooops. All transmit buffers are full. Bail out.
|
|
|
|
* This should not happen, since dev->tbusy should be set.
|
|
|
|
*/
|
|
|
|
printk("%s: tx queue full!.\n", dev->name);
|
2008-05-01 08:08:12 +04:00
|
|
|
spin_unlock_irqrestore(&fep->hw_lock, flags);
|
2009-06-12 10:22:29 +04:00
|
|
|
return NETDEV_TX_BUSY;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Clear all of the status flags */
|
2006-06-27 07:19:33 +04:00
|
|
|
status &= ~BD_ENET_TX_STATS;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Set buffer length and buffer pointer */
|
2009-08-06 21:58:18 +04:00
|
|
|
bufaddr = skb->data;
|
2005-04-17 02:20:36 +04:00
|
|
|
bdp->cbd_datlen = skb->len;
|
|
|
|
|
|
|
|
/*
|
2009-04-15 05:32:18 +04:00
|
|
|
* On some FEC implementations data must be aligned on
|
|
|
|
* 4-byte boundaries. Use bounce buffers to copy data
|
|
|
|
* and get it aligned. Ugh.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2009-08-06 21:58:18 +04:00
|
|
|
if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned int index;
|
|
|
|
index = bdp - fep->tx_bd_base;
|
2009-01-29 02:03:06 +03:00
|
|
|
memcpy(fep->tx_bounce[index], (void *)skb->data, skb->len);
|
2009-08-06 21:58:18 +04:00
|
|
|
bufaddr = fep->tx_bounce[index];
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Save skb pointer */
|
2005-04-17 02:20:36 +04:00
|
|
|
fep->tx_skbuff[fep->skb_cur] = skb;
|
|
|
|
|
2007-10-04 04:41:50 +04:00
|
|
|
dev->stats.tx_bytes += skb->len;
|
2005-04-17 02:20:36 +04:00
|
|
|
fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
|
2006-09-13 21:24:59 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* Push the data cache so the CPM does not get stale memory
|
|
|
|
* data.
|
|
|
|
*/
|
2009-08-06 21:58:18 +04:00
|
|
|
bdp->cbd_bufaddr = dma_map_single(&dev->dev, bufaddr,
|
2009-04-15 05:32:24 +04:00
|
|
|
FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2006-06-27 07:19:33 +04:00
|
|
|
/* Send it on its way. Tell FEC it's ready, interrupt when done,
|
|
|
|
* it's the last BD of the frame, and to put the CRC on the end.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2006-06-27 07:19:33 +04:00
|
|
|
status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
|
2005-04-17 02:20:36 +04:00
|
|
|
| BD_ENET_TX_LAST | BD_ENET_TX_TC);
|
2006-06-27 07:19:33 +04:00
|
|
|
bdp->cbd_sc = status;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
dev->trans_start = jiffies;
|
|
|
|
|
|
|
|
/* Trigger transmission start */
|
2009-04-15 07:11:30 +04:00
|
|
|
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* If this was the last BD in the ring, start at the beginning again. */
|
|
|
|
if (status & BD_ENET_TX_WRAP)
|
2005-04-17 02:20:36 +04:00
|
|
|
bdp = fep->tx_bd_base;
|
2009-04-15 05:32:18 +04:00
|
|
|
else
|
2005-04-17 02:20:36 +04:00
|
|
|
bdp++;
|
|
|
|
|
|
|
|
if (bdp == fep->dirty_tx) {
|
|
|
|
fep->tx_full = 1;
|
|
|
|
netif_stop_queue(dev);
|
|
|
|
}
|
|
|
|
|
2009-04-15 05:32:16 +04:00
|
|
|
fep->cur_tx = bdp;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-05-01 08:08:12 +04:00
|
|
|
spin_unlock_irqrestore(&fep->hw_lock, flags);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-06-23 10:03:08 +04:00
|
|
|
return NETDEV_TX_OK;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fec_timeout(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
|
2007-10-04 04:41:50 +04:00
|
|
|
dev->stats.tx_errors++;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
fec_restart(dev, fep->full_duplex);
|
2005-04-17 02:20:36 +04:00
|
|
|
netif_wake_queue(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 17:55:46 +04:00
|
|
|
fec_enet_interrupt(int irq, void * dev_id)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct net_device *dev = dev_id;
|
2009-04-15 07:11:30 +04:00
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
uint int_events;
|
2008-05-01 08:08:12 +04:00
|
|
|
irqreturn_t ret = IRQ_NONE;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2008-05-01 08:08:12 +04:00
|
|
|
do {
|
2009-04-15 07:11:30 +04:00
|
|
|
int_events = readl(fep->hwp + FEC_IEVENT);
|
|
|
|
writel(int_events, fep->hwp + FEC_IEVENT);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (int_events & FEC_ENET_RXF) {
|
2008-05-01 08:08:12 +04:00
|
|
|
ret = IRQ_HANDLED;
|
2005-04-17 02:20:36 +04:00
|
|
|
fec_enet_rx(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Transmit OK, or non-fatal error. Update the buffer
|
2009-04-15 07:11:30 +04:00
|
|
|
* descriptors. FEC handles all errors, we just discover
|
|
|
|
* them as part of the transmit process.
|
|
|
|
*/
|
2005-04-17 02:20:36 +04:00
|
|
|
if (int_events & FEC_ENET_TXF) {
|
2008-05-01 08:08:12 +04:00
|
|
|
ret = IRQ_HANDLED;
|
2005-04-17 02:20:36 +04:00
|
|
|
fec_enet_tx(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (int_events & FEC_ENET_MII) {
|
2008-05-01 08:08:12 +04:00
|
|
|
ret = IRQ_HANDLED;
|
2005-04-17 02:20:36 +04:00
|
|
|
fec_enet_mii(dev);
|
|
|
|
}
|
2006-09-13 21:24:59 +04:00
|
|
|
|
2008-05-01 08:08:12 +04:00
|
|
|
} while (int_events);
|
|
|
|
|
|
|
|
return ret;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
fec_enet_tx(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep;
|
2009-04-15 05:32:16 +04:00
|
|
|
struct bufdesc *bdp;
|
2006-06-27 07:19:33 +04:00
|
|
|
unsigned short status;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct sk_buff *skb;
|
|
|
|
|
|
|
|
fep = netdev_priv(dev);
|
2009-09-02 03:14:16 +04:00
|
|
|
spin_lock(&fep->hw_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
bdp = fep->dirty_tx;
|
|
|
|
|
2006-06-27 07:19:33 +04:00
|
|
|
while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
|
2009-04-15 05:32:24 +04:00
|
|
|
if (bdp == fep->cur_tx && fep->tx_full == 0)
|
|
|
|
break;
|
|
|
|
|
|
|
|
dma_unmap_single(&dev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
|
|
|
|
bdp->cbd_bufaddr = 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
skb = fep->tx_skbuff[fep->skb_dirty];
|
|
|
|
/* Check for errors. */
|
2006-06-27 07:19:33 +04:00
|
|
|
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
|
2005-04-17 02:20:36 +04:00
|
|
|
BD_ENET_TX_RL | BD_ENET_TX_UN |
|
|
|
|
BD_ENET_TX_CSL)) {
|
2007-10-04 04:41:50 +04:00
|
|
|
dev->stats.tx_errors++;
|
2006-06-27 07:19:33 +04:00
|
|
|
if (status & BD_ENET_TX_HB) /* No heartbeat */
|
2007-10-04 04:41:50 +04:00
|
|
|
dev->stats.tx_heartbeat_errors++;
|
2006-06-27 07:19:33 +04:00
|
|
|
if (status & BD_ENET_TX_LC) /* Late collision */
|
2007-10-04 04:41:50 +04:00
|
|
|
dev->stats.tx_window_errors++;
|
2006-06-27 07:19:33 +04:00
|
|
|
if (status & BD_ENET_TX_RL) /* Retrans limit */
|
2007-10-04 04:41:50 +04:00
|
|
|
dev->stats.tx_aborted_errors++;
|
2006-06-27 07:19:33 +04:00
|
|
|
if (status & BD_ENET_TX_UN) /* Underrun */
|
2007-10-04 04:41:50 +04:00
|
|
|
dev->stats.tx_fifo_errors++;
|
2006-06-27 07:19:33 +04:00
|
|
|
if (status & BD_ENET_TX_CSL) /* Carrier lost */
|
2007-10-04 04:41:50 +04:00
|
|
|
dev->stats.tx_carrier_errors++;
|
2005-04-17 02:20:36 +04:00
|
|
|
} else {
|
2007-10-04 04:41:50 +04:00
|
|
|
dev->stats.tx_packets++;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2006-06-27 07:19:33 +04:00
|
|
|
if (status & BD_ENET_TX_READY)
|
2005-04-17 02:20:36 +04:00
|
|
|
printk("HEY! Enet xmit interrupt and TX_READY.\n");
|
2009-04-15 05:32:18 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* Deferred means some collisions occurred during transmit,
|
|
|
|
* but we eventually sent the packet OK.
|
|
|
|
*/
|
2006-06-27 07:19:33 +04:00
|
|
|
if (status & BD_ENET_TX_DEF)
|
2007-10-04 04:41:50 +04:00
|
|
|
dev->stats.collisions++;
|
2006-09-13 21:24:59 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Free the sk buffer associated with this last transmit */
|
2005-04-17 02:20:36 +04:00
|
|
|
dev_kfree_skb_any(skb);
|
|
|
|
fep->tx_skbuff[fep->skb_dirty] = NULL;
|
|
|
|
fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
|
2006-09-13 21:24:59 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Update pointer to next buffer descriptor to be transmitted */
|
2006-06-27 07:19:33 +04:00
|
|
|
if (status & BD_ENET_TX_WRAP)
|
2005-04-17 02:20:36 +04:00
|
|
|
bdp = fep->tx_bd_base;
|
|
|
|
else
|
|
|
|
bdp++;
|
2006-09-13 21:24:59 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Since we have freed up a buffer, the ring is no longer full
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
if (fep->tx_full) {
|
|
|
|
fep->tx_full = 0;
|
|
|
|
if (netif_queue_stopped(dev))
|
|
|
|
netif_wake_queue(dev);
|
|
|
|
}
|
|
|
|
}
|
2009-04-15 05:32:16 +04:00
|
|
|
fep->dirty_tx = bdp;
|
2009-09-02 03:14:16 +04:00
|
|
|
spin_unlock(&fep->hw_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
/* During a receive, the cur_rx points to the current incoming buffer.
|
|
|
|
* When we update through the ring, if the next incoming buffer has
|
|
|
|
* not been given to the system, we just set the empty indicator,
|
|
|
|
* effectively tossing the packet.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
fec_enet_rx(struct net_device *dev)
|
|
|
|
{
|
2009-04-15 07:11:30 +04:00
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
2009-04-15 05:32:16 +04:00
|
|
|
struct bufdesc *bdp;
|
2006-06-27 07:19:33 +04:00
|
|
|
unsigned short status;
|
2005-04-17 02:20:36 +04:00
|
|
|
struct sk_buff *skb;
|
|
|
|
ushort pkt_len;
|
|
|
|
__u8 *data;
|
2006-09-13 21:24:59 +04:00
|
|
|
|
2006-06-27 07:19:33 +04:00
|
|
|
#ifdef CONFIG_M532x
|
|
|
|
flush_cache_all();
|
2006-09-13 21:24:59 +04:00
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-09-02 03:14:16 +04:00
|
|
|
spin_lock(&fep->hw_lock);
|
2008-05-01 08:08:12 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* First, grab all of the stats for the incoming packet.
|
|
|
|
* These get messed up if we get called due to a busy condition.
|
|
|
|
*/
|
|
|
|
bdp = fep->cur_rx;
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Since we have allocated space to hold a complete frame,
|
|
|
|
* the last indicator should be set.
|
|
|
|
*/
|
|
|
|
if ((status & BD_ENET_RX_LAST) == 0)
|
|
|
|
printk("FEC ENET: rcv is not +last\n");
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
if (!fep->opened)
|
|
|
|
goto rx_processing_done;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Check for errors. */
|
|
|
|
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
|
2005-04-17 02:20:36 +04:00
|
|
|
BD_ENET_RX_CR | BD_ENET_RX_OV)) {
|
2009-04-15 05:32:18 +04:00
|
|
|
dev->stats.rx_errors++;
|
|
|
|
if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
|
|
|
|
/* Frame too long or too short. */
|
|
|
|
dev->stats.rx_length_errors++;
|
|
|
|
}
|
|
|
|
if (status & BD_ENET_RX_NO) /* Frame alignment */
|
|
|
|
dev->stats.rx_frame_errors++;
|
|
|
|
if (status & BD_ENET_RX_CR) /* CRC Error */
|
|
|
|
dev->stats.rx_crc_errors++;
|
|
|
|
if (status & BD_ENET_RX_OV) /* FIFO overrun */
|
|
|
|
dev->stats.rx_fifo_errors++;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Report late collisions as a frame error.
|
|
|
|
* On this error, the BD is closed, but we don't know what we
|
|
|
|
* have in the buffer. So, just drop this frame on the floor.
|
|
|
|
*/
|
|
|
|
if (status & BD_ENET_RX_CL) {
|
|
|
|
dev->stats.rx_errors++;
|
|
|
|
dev->stats.rx_frame_errors++;
|
|
|
|
goto rx_processing_done;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Process the incoming frame. */
|
|
|
|
dev->stats.rx_packets++;
|
|
|
|
pkt_len = bdp->cbd_datlen;
|
|
|
|
dev->stats.rx_bytes += pkt_len;
|
|
|
|
data = (__u8*)__va(bdp->cbd_bufaddr);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:24 +04:00
|
|
|
dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
|
|
|
|
DMA_FROM_DEVICE);
|
2009-01-29 02:03:09 +03:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* This does 16 byte alignment, exactly what we need.
|
|
|
|
* The packet length includes FCS, but we don't want to
|
|
|
|
* include that when passing upstream as it messes up
|
|
|
|
* bridging applications.
|
|
|
|
*/
|
2009-04-15 05:32:21 +04:00
|
|
|
skb = dev_alloc_skb(pkt_len - 4 + NET_IP_ALIGN);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:21 +04:00
|
|
|
if (unlikely(!skb)) {
|
2009-04-15 05:32:18 +04:00
|
|
|
printk("%s: Memory squeeze, dropping packet.\n",
|
|
|
|
dev->name);
|
|
|
|
dev->stats.rx_dropped++;
|
|
|
|
} else {
|
2009-04-15 05:32:21 +04:00
|
|
|
skb_reserve(skb, NET_IP_ALIGN);
|
2009-04-15 05:32:18 +04:00
|
|
|
skb_put(skb, pkt_len - 4); /* Make room */
|
|
|
|
skb_copy_to_linear_data(skb, data, pkt_len - 4);
|
|
|
|
skb->protocol = eth_type_trans(skb, dev);
|
|
|
|
netif_rx(skb);
|
|
|
|
}
|
2009-04-15 05:32:24 +04:00
|
|
|
|
|
|
|
bdp->cbd_bufaddr = dma_map_single(NULL, data, bdp->cbd_datlen,
|
|
|
|
DMA_FROM_DEVICE);
|
2009-04-15 05:32:18 +04:00
|
|
|
rx_processing_done:
|
|
|
|
/* Clear the status flags for this buffer */
|
|
|
|
status &= ~BD_ENET_RX_STATS;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Mark the buffer empty */
|
|
|
|
status |= BD_ENET_RX_EMPTY;
|
|
|
|
bdp->cbd_sc = status;
|
2006-09-13 21:24:59 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Update BD pointer to next entry */
|
|
|
|
if (status & BD_ENET_RX_WRAP)
|
|
|
|
bdp = fep->rx_bd_base;
|
|
|
|
else
|
|
|
|
bdp++;
|
|
|
|
/* Doing this here will keep the FEC running while we process
|
|
|
|
* incoming frames. On a heavily loaded network, we should be
|
|
|
|
* able to keep up at the expense of system resources.
|
|
|
|
*/
|
|
|
|
writel(0, fep->hwp + FEC_R_DES_ACTIVE);
|
|
|
|
}
|
2009-04-15 05:32:16 +04:00
|
|
|
fep->cur_rx = bdp;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-09-02 03:14:16 +04:00
|
|
|
spin_unlock(&fep->hw_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2006-06-27 07:19:33 +04:00
|
|
|
/* called from interrupt context */
|
2005-04-17 02:20:36 +04:00
|
|
|
static void
|
|
|
|
fec_enet_mii(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep;
|
|
|
|
mii_list_t *mip;
|
|
|
|
|
|
|
|
fep = netdev_priv(dev);
|
2009-09-02 03:14:16 +04:00
|
|
|
spin_lock(&fep->mii_lock);
|
2008-05-01 08:08:12 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
if ((mip = mii_head) == NULL) {
|
|
|
|
printk("MII and no head!\n");
|
2006-06-27 07:19:33 +04:00
|
|
|
goto unlock;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
if (mip->mii_func != NULL)
|
2009-04-15 07:11:30 +04:00
|
|
|
(*(mip->mii_func))(readl(fep->hwp + FEC_MII_DATA), dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
mii_head = mip->mii_next;
|
|
|
|
mip->mii_next = mii_free;
|
|
|
|
mii_free = mip;
|
|
|
|
|
|
|
|
if ((mip = mii_head) != NULL)
|
2009-04-15 07:11:30 +04:00
|
|
|
writel(mip->mii_regval, fep->hwp + FEC_MII_DATA);
|
2006-06-27 07:19:33 +04:00
|
|
|
|
|
|
|
unlock:
|
2009-09-02 03:14:16 +04:00
|
|
|
spin_unlock(&fep->mii_lock);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
2009-09-02 03:14:15 +04:00
|
|
|
mii_queue_unlocked(struct net_device *dev, int regval,
|
|
|
|
void (*func)(uint, struct net_device *))
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct fec_enet_private *fep;
|
|
|
|
mii_list_t *mip;
|
|
|
|
int retval;
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Add PHY address to register command */
|
2005-04-17 02:20:36 +04:00
|
|
|
fep = netdev_priv(dev);
|
|
|
|
|
2008-05-01 08:08:12 +04:00
|
|
|
regval |= fep->phy_addr << 23;
|
2005-04-17 02:20:36 +04:00
|
|
|
retval = 0;
|
|
|
|
|
|
|
|
if ((mip = mii_free) != NULL) {
|
|
|
|
mii_free = mip->mii_next;
|
|
|
|
mip->mii_regval = regval;
|
|
|
|
mip->mii_func = func;
|
|
|
|
mip->mii_next = NULL;
|
|
|
|
if (mii_head) {
|
|
|
|
mii_tail->mii_next = mip;
|
|
|
|
mii_tail = mip;
|
2007-10-23 08:37:54 +04:00
|
|
|
} else {
|
2005-04-17 02:20:36 +04:00
|
|
|
mii_head = mii_tail = mip;
|
2009-04-15 07:11:30 +04:00
|
|
|
writel(regval, fep->hwp + FEC_MII_DATA);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2007-10-23 08:37:54 +04:00
|
|
|
} else {
|
2005-04-17 02:20:36 +04:00
|
|
|
retval = 1;
|
|
|
|
}
|
|
|
|
|
2009-09-02 03:14:15 +04:00
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
mii_queue(struct net_device *dev, int regval,
|
|
|
|
void (*func)(uint, struct net_device *))
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep;
|
|
|
|
unsigned long flags;
|
|
|
|
int retval;
|
|
|
|
fep = netdev_priv(dev);
|
|
|
|
spin_lock_irqsave(&fep->mii_lock, flags);
|
|
|
|
retval = mii_queue_unlocked(dev, regval, func);
|
2008-05-01 08:08:12 +04:00
|
|
|
spin_unlock_irqrestore(&fep->mii_lock, flags);
|
|
|
|
return retval;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
|
|
|
|
{
|
|
|
|
if(!c)
|
|
|
|
return;
|
|
|
|
|
2007-10-23 08:37:54 +04:00
|
|
|
for (; c->mii_data != mk_mii_end; c++)
|
|
|
|
mii_queue(dev, c->mii_data, c->funct);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mii_parse_sr(uint mii_reg, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
volatile uint *s = &(fep->phy_status);
|
2005-09-12 05:18:10 +04:00
|
|
|
uint status;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
status = *s & ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (mii_reg & 0x0004)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_STAT_LINK;
|
2005-04-17 02:20:36 +04:00
|
|
|
if (mii_reg & 0x0010)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_STAT_FAULT;
|
2005-04-17 02:20:36 +04:00
|
|
|
if (mii_reg & 0x0020)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_STAT_ANC;
|
|
|
|
*s = status;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mii_parse_cr(uint mii_reg, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
volatile uint *s = &(fep->phy_status);
|
2005-09-12 05:18:10 +04:00
|
|
|
uint status;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
status = *s & ~(PHY_CONF_ANE | PHY_CONF_LOOP);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (mii_reg & 0x1000)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_CONF_ANE;
|
2005-04-17 02:20:36 +04:00
|
|
|
if (mii_reg & 0x4000)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_CONF_LOOP;
|
|
|
|
*s = status;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void mii_parse_anar(uint mii_reg, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
volatile uint *s = &(fep->phy_status);
|
2005-09-12 05:18:10 +04:00
|
|
|
uint status;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
status = *s & ~(PHY_CONF_SPMASK);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (mii_reg & 0x0020)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_CONF_10HDX;
|
2005-04-17 02:20:36 +04:00
|
|
|
if (mii_reg & 0x0040)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_CONF_10FDX;
|
2005-04-17 02:20:36 +04:00
|
|
|
if (mii_reg & 0x0080)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_CONF_100HDX;
|
2005-04-17 02:20:36 +04:00
|
|
|
if (mii_reg & 0x00100)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_CONF_100FDX;
|
|
|
|
*s = status;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* The Level one LXT970 is used by many boards */
|
|
|
|
|
|
|
|
#define MII_LXT970_MIRROR 16 /* Mirror register */
|
|
|
|
#define MII_LXT970_IER 17 /* Interrupt Enable Register */
|
|
|
|
#define MII_LXT970_ISR 18 /* Interrupt Status Register */
|
|
|
|
#define MII_LXT970_CONFIG 19 /* Configuration Register */
|
|
|
|
#define MII_LXT970_CSR 20 /* Chip Status Register */
|
|
|
|
|
|
|
|
static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
volatile uint *s = &(fep->phy_status);
|
2005-09-12 05:18:10 +04:00
|
|
|
uint status;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
status = *s & ~(PHY_STAT_SPMASK);
|
2005-04-17 02:20:36 +04:00
|
|
|
if (mii_reg & 0x0800) {
|
|
|
|
if (mii_reg & 0x1000)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_STAT_100FDX;
|
2005-04-17 02:20:36 +04:00
|
|
|
else
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_STAT_100HDX;
|
2005-04-17 02:20:36 +04:00
|
|
|
} else {
|
|
|
|
if (mii_reg & 0x1000)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_STAT_10FDX;
|
2005-04-17 02:20:36 +04:00
|
|
|
else
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_STAT_10HDX;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2005-09-12 05:18:10 +04:00
|
|
|
*s = status;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
static phy_cmd_t const phy_cmd_lxt970_config[] = {
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_read(MII_REG_CR), mii_parse_cr },
|
|
|
|
{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_cmd_t const phy_cmd_lxt970_startup[] = { /* enable interrupts */
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
|
|
|
|
{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_cmd_t const phy_cmd_lxt970_ack_int[] = {
|
2005-04-17 02:20:36 +04:00
|
|
|
/* read SR and ISR to acknowledge */
|
|
|
|
{ mk_mii_read(MII_REG_SR), mii_parse_sr },
|
|
|
|
{ mk_mii_read(MII_LXT970_ISR), NULL },
|
|
|
|
|
|
|
|
/* find out the current status */
|
|
|
|
{ mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_cmd_t const phy_cmd_lxt970_shutdown[] = { /* disable interrupts */
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_info_t const phy_info_lxt970 = {
|
2006-09-13 21:24:59 +04:00
|
|
|
.id = 0x07810000,
|
2005-09-12 05:18:10 +04:00
|
|
|
.name = "LXT970",
|
|
|
|
.config = phy_cmd_lxt970_config,
|
|
|
|
.startup = phy_cmd_lxt970_startup,
|
|
|
|
.ack_int = phy_cmd_lxt970_ack_int,
|
|
|
|
.shutdown = phy_cmd_lxt970_shutdown
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
2006-09-13 21:24:59 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* The Level one LXT971 is used on some of my custom boards */
|
|
|
|
|
|
|
|
/* register definitions for the 971 */
|
|
|
|
|
|
|
|
#define MII_LXT971_PCR 16 /* Port Control Register */
|
|
|
|
#define MII_LXT971_SR2 17 /* Status Register 2 */
|
|
|
|
#define MII_LXT971_IER 18 /* Interrupt Enable Register */
|
|
|
|
#define MII_LXT971_ISR 19 /* Interrupt Status Register */
|
|
|
|
#define MII_LXT971_LCR 20 /* LED Control Register */
|
|
|
|
#define MII_LXT971_TCR 30 /* Transmit Control Register */
|
|
|
|
|
2006-09-13 21:24:59 +04:00
|
|
|
/*
|
2005-04-17 02:20:36 +04:00
|
|
|
* I had some nice ideas of running the MDIO faster...
|
|
|
|
* The 971 should support 8MHz and I tried it, but things acted really
|
|
|
|
* weird, so 2.5 MHz ought to be enough for anyone...
|
|
|
|
*/
|
|
|
|
|
|
|
|
static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
volatile uint *s = &(fep->phy_status);
|
2005-09-12 05:18:10 +04:00
|
|
|
uint status;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (mii_reg & 0x0400) {
|
|
|
|
fep->link = 1;
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_STAT_LINK;
|
2005-04-17 02:20:36 +04:00
|
|
|
} else {
|
|
|
|
fep->link = 0;
|
|
|
|
}
|
|
|
|
if (mii_reg & 0x0080)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_STAT_ANC;
|
2005-04-17 02:20:36 +04:00
|
|
|
if (mii_reg & 0x4000) {
|
|
|
|
if (mii_reg & 0x0200)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_STAT_100FDX;
|
2005-04-17 02:20:36 +04:00
|
|
|
else
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_STAT_100HDX;
|
2005-04-17 02:20:36 +04:00
|
|
|
} else {
|
|
|
|
if (mii_reg & 0x0200)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_STAT_10FDX;
|
2005-04-17 02:20:36 +04:00
|
|
|
else
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_STAT_10HDX;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
if (mii_reg & 0x0008)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_STAT_FAULT;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
*s = status;
|
|
|
|
}
|
2006-09-13 21:24:59 +04:00
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
static phy_cmd_t const phy_cmd_lxt971_config[] = {
|
2006-09-13 21:24:59 +04:00
|
|
|
/* limit to 10MBit because my prototype board
|
2005-04-17 02:20:36 +04:00
|
|
|
* doesn't work with 100. */
|
|
|
|
{ mk_mii_read(MII_REG_CR), mii_parse_cr },
|
|
|
|
{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
|
|
|
|
{ mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_cmd_t const phy_cmd_lxt971_startup[] = { /* enable interrupts */
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
|
|
|
|
{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
|
|
|
|
{ mk_mii_write(MII_LXT971_LCR, 0xd422), NULL }, /* LED config */
|
|
|
|
/* Somehow does the 971 tell me that the link is down
|
|
|
|
* the first read after power-up.
|
|
|
|
* read here to get a valid value in ack_int */
|
2006-09-13 21:24:59 +04:00
|
|
|
{ mk_mii_read(MII_REG_SR), mii_parse_sr },
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_cmd_t const phy_cmd_lxt971_ack_int[] = {
|
|
|
|
/* acknowledge the int before reading status ! */
|
|
|
|
{ mk_mii_read(MII_LXT971_ISR), NULL },
|
2005-04-17 02:20:36 +04:00
|
|
|
/* find out the current status */
|
|
|
|
{ mk_mii_read(MII_REG_SR), mii_parse_sr },
|
|
|
|
{ mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_cmd_t const phy_cmd_lxt971_shutdown[] = { /* disable interrupts */
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_info_t const phy_info_lxt971 = {
|
2006-09-13 21:24:59 +04:00
|
|
|
.id = 0x0001378e,
|
2005-09-12 05:18:10 +04:00
|
|
|
.name = "LXT971",
|
|
|
|
.config = phy_cmd_lxt971_config,
|
|
|
|
.startup = phy_cmd_lxt971_startup,
|
|
|
|
.ack_int = phy_cmd_lxt971_ack_int,
|
|
|
|
.shutdown = phy_cmd_lxt971_shutdown
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* The Quality Semiconductor QS6612 is used on the RPX CLLF */
|
|
|
|
|
|
|
|
/* register definitions */
|
|
|
|
|
|
|
|
#define MII_QS6612_MCR 17 /* Mode Control Register */
|
|
|
|
#define MII_QS6612_FTR 27 /* Factory Test Register */
|
|
|
|
#define MII_QS6612_MCO 28 /* Misc. Control Register */
|
|
|
|
#define MII_QS6612_ISR 29 /* Interrupt Source Register */
|
|
|
|
#define MII_QS6612_IMR 30 /* Interrupt Mask Register */
|
|
|
|
#define MII_QS6612_PCR 31 /* 100BaseTx PHY Control Reg. */
|
|
|
|
|
|
|
|
static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
volatile uint *s = &(fep->phy_status);
|
2005-09-12 05:18:10 +04:00
|
|
|
uint status;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
status = *s & ~(PHY_STAT_SPMASK);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
switch((mii_reg >> 2) & 7) {
|
2005-09-12 05:18:10 +04:00
|
|
|
case 1: status |= PHY_STAT_10HDX; break;
|
|
|
|
case 2: status |= PHY_STAT_100HDX; break;
|
|
|
|
case 5: status |= PHY_STAT_10FDX; break;
|
|
|
|
case 6: status |= PHY_STAT_100FDX; break;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
*s = status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static phy_cmd_t const phy_cmd_qs6612_config[] = {
|
2006-09-13 21:24:59 +04:00
|
|
|
/* The PHY powers up isolated on the RPX,
|
2005-04-17 02:20:36 +04:00
|
|
|
* so send a command to allow operation.
|
|
|
|
*/
|
|
|
|
{ mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
|
|
|
|
|
|
|
|
/* parse cr and anar to get some info */
|
|
|
|
{ mk_mii_read(MII_REG_CR), mii_parse_cr },
|
|
|
|
{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_cmd_t const phy_cmd_qs6612_startup[] = { /* enable interrupts */
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
|
|
|
|
{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_cmd_t const phy_cmd_qs6612_ack_int[] = {
|
2005-04-17 02:20:36 +04:00
|
|
|
/* we need to read ISR, SR and ANER to acknowledge */
|
|
|
|
{ mk_mii_read(MII_QS6612_ISR), NULL },
|
|
|
|
{ mk_mii_read(MII_REG_SR), mii_parse_sr },
|
|
|
|
{ mk_mii_read(MII_REG_ANER), NULL },
|
|
|
|
|
|
|
|
/* read pcr to get info */
|
|
|
|
{ mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_cmd_t const phy_cmd_qs6612_shutdown[] = { /* disable interrupts */
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_info_t const phy_info_qs6612 = {
|
2006-09-13 21:24:59 +04:00
|
|
|
.id = 0x00181440,
|
2005-09-12 05:18:10 +04:00
|
|
|
.name = "QS6612",
|
|
|
|
.config = phy_cmd_qs6612_config,
|
|
|
|
.startup = phy_cmd_qs6612_startup,
|
|
|
|
.ack_int = phy_cmd_qs6612_ack_int,
|
|
|
|
.shutdown = phy_cmd_qs6612_shutdown
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* AMD AM79C874 phy */
|
|
|
|
|
|
|
|
/* register definitions for the 874 */
|
|
|
|
|
|
|
|
#define MII_AM79C874_MFR 16 /* Miscellaneous Feature Register */
|
|
|
|
#define MII_AM79C874_ICSR 17 /* Interrupt/Status Register */
|
|
|
|
#define MII_AM79C874_DR 18 /* Diagnostic Register */
|
|
|
|
#define MII_AM79C874_PMLR 19 /* Power and Loopback Register */
|
|
|
|
#define MII_AM79C874_MCR 21 /* ModeControl Register */
|
|
|
|
#define MII_AM79C874_DC 23 /* Disconnect Counter */
|
|
|
|
#define MII_AM79C874_REC 24 /* Recieve Error Counter */
|
|
|
|
|
|
|
|
static void mii_parse_am79c874_dr(uint mii_reg, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
volatile uint *s = &(fep->phy_status);
|
2005-09-12 05:18:10 +04:00
|
|
|
uint status;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
status = *s & ~(PHY_STAT_SPMASK | PHY_STAT_ANC);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
if (mii_reg & 0x0080)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= PHY_STAT_ANC;
|
2005-04-17 02:20:36 +04:00
|
|
|
if (mii_reg & 0x0400)
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= ((mii_reg & 0x0800) ? PHY_STAT_100FDX : PHY_STAT_100HDX);
|
2005-04-17 02:20:36 +04:00
|
|
|
else
|
2005-09-12 05:18:10 +04:00
|
|
|
status |= ((mii_reg & 0x0800) ? PHY_STAT_10FDX : PHY_STAT_10HDX);
|
|
|
|
|
|
|
|
*s = status;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
static phy_cmd_t const phy_cmd_am79c874_config[] = {
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_read(MII_REG_CR), mii_parse_cr },
|
|
|
|
{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
|
|
|
|
{ mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_cmd_t const phy_cmd_am79c874_startup[] = { /* enable interrupts */
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL },
|
|
|
|
{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
|
2006-09-13 21:24:59 +04:00
|
|
|
{ mk_mii_read(MII_REG_SR), mii_parse_sr },
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_cmd_t const phy_cmd_am79c874_ack_int[] = {
|
2005-04-17 02:20:36 +04:00
|
|
|
/* find out the current status */
|
|
|
|
{ mk_mii_read(MII_REG_SR), mii_parse_sr },
|
|
|
|
{ mk_mii_read(MII_AM79C874_DR), mii_parse_am79c874_dr },
|
|
|
|
/* we only need to read ISR to acknowledge */
|
|
|
|
{ mk_mii_read(MII_AM79C874_ICSR), NULL },
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_cmd_t const phy_cmd_am79c874_shutdown[] = { /* disable interrupts */
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL },
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_info_t const phy_info_am79c874 = {
|
|
|
|
.id = 0x00022561,
|
|
|
|
.name = "AM79C874",
|
|
|
|
.config = phy_cmd_am79c874_config,
|
|
|
|
.startup = phy_cmd_am79c874_startup,
|
|
|
|
.ack_int = phy_cmd_am79c874_ack_int,
|
|
|
|
.shutdown = phy_cmd_am79c874_shutdown
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* Kendin KS8721BL phy */
|
|
|
|
|
|
|
|
/* register definitions for the 8721 */
|
|
|
|
|
|
|
|
#define MII_KS8721BL_RXERCR 21
|
2009-01-29 02:03:08 +03:00
|
|
|
#define MII_KS8721BL_ICSR 27
|
2005-04-17 02:20:36 +04:00
|
|
|
#define MII_KS8721BL_PHYCR 31
|
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
static phy_cmd_t const phy_cmd_ks8721bl_config[] = {
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_read(MII_REG_CR), mii_parse_cr },
|
|
|
|
{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_cmd_t const phy_cmd_ks8721bl_startup[] = { /* enable interrupts */
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_write(MII_KS8721BL_ICSR, 0xff00), NULL },
|
|
|
|
{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
|
2006-09-13 21:24:59 +04:00
|
|
|
{ mk_mii_read(MII_REG_SR), mii_parse_sr },
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_cmd_t const phy_cmd_ks8721bl_ack_int[] = {
|
2005-04-17 02:20:36 +04:00
|
|
|
/* find out the current status */
|
|
|
|
{ mk_mii_read(MII_REG_SR), mii_parse_sr },
|
|
|
|
/* we only need to read ISR to acknowledge */
|
|
|
|
{ mk_mii_read(MII_KS8721BL_ICSR), NULL },
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_cmd_t const phy_cmd_ks8721bl_shutdown[] = { /* disable interrupts */
|
2005-04-17 02:20:36 +04:00
|
|
|
{ mk_mii_write(MII_KS8721BL_ICSR, 0x0000), NULL },
|
|
|
|
{ mk_mii_end, }
|
2005-09-12 05:18:10 +04:00
|
|
|
};
|
|
|
|
static phy_info_t const phy_info_ks8721bl = {
|
2006-09-13 21:24:59 +04:00
|
|
|
.id = 0x00022161,
|
2005-09-12 05:18:10 +04:00
|
|
|
.name = "KS8721BL",
|
|
|
|
.config = phy_cmd_ks8721bl_config,
|
|
|
|
.startup = phy_cmd_ks8721bl_startup,
|
|
|
|
.ack_int = phy_cmd_ks8721bl_ack_int,
|
|
|
|
.shutdown = phy_cmd_ks8721bl_shutdown
|
2005-04-17 02:20:36 +04:00
|
|
|
};
|
|
|
|
|
2005-11-07 07:09:50 +03:00
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
/* register definitions for the DP83848 */
|
|
|
|
|
|
|
|
#define MII_DP8384X_PHYSTST 16 /* PHY Status Register */
|
|
|
|
|
|
|
|
static void mii_parse_dp8384x_sr2(uint mii_reg, struct net_device *dev)
|
|
|
|
{
|
2008-11-13 10:38:14 +03:00
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
2005-11-07 07:09:50 +03:00
|
|
|
volatile uint *s = &(fep->phy_status);
|
|
|
|
|
|
|
|
*s &= ~(PHY_STAT_SPMASK | PHY_STAT_LINK | PHY_STAT_ANC);
|
|
|
|
|
|
|
|
/* Link up */
|
|
|
|
if (mii_reg & 0x0001) {
|
|
|
|
fep->link = 1;
|
|
|
|
*s |= PHY_STAT_LINK;
|
|
|
|
} else
|
|
|
|
fep->link = 0;
|
|
|
|
/* Status of link */
|
|
|
|
if (mii_reg & 0x0010) /* Autonegotioation complete */
|
|
|
|
*s |= PHY_STAT_ANC;
|
|
|
|
if (mii_reg & 0x0002) { /* 10MBps? */
|
|
|
|
if (mii_reg & 0x0004) /* Full Duplex? */
|
|
|
|
*s |= PHY_STAT_10FDX;
|
|
|
|
else
|
|
|
|
*s |= PHY_STAT_10HDX;
|
|
|
|
} else { /* 100 Mbps? */
|
|
|
|
if (mii_reg & 0x0004) /* Full Duplex? */
|
|
|
|
*s |= PHY_STAT_100FDX;
|
|
|
|
else
|
|
|
|
*s |= PHY_STAT_100HDX;
|
|
|
|
}
|
|
|
|
if (mii_reg & 0x0008)
|
|
|
|
*s |= PHY_STAT_FAULT;
|
|
|
|
}
|
|
|
|
|
|
|
|
static phy_info_t phy_info_dp83848= {
|
|
|
|
0x020005c9,
|
|
|
|
"DP83848",
|
|
|
|
|
|
|
|
(const phy_cmd_t []) { /* config */
|
|
|
|
{ mk_mii_read(MII_REG_CR), mii_parse_cr },
|
|
|
|
{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
|
|
|
|
{ mk_mii_read(MII_DP8384X_PHYSTST), mii_parse_dp8384x_sr2 },
|
|
|
|
{ mk_mii_end, }
|
|
|
|
},
|
|
|
|
(const phy_cmd_t []) { /* startup - enable interrupts */
|
|
|
|
{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
|
|
|
|
{ mk_mii_read(MII_REG_SR), mii_parse_sr },
|
|
|
|
{ mk_mii_end, }
|
|
|
|
},
|
|
|
|
(const phy_cmd_t []) { /* ack_int - never happens, no interrupt */
|
|
|
|
{ mk_mii_end, }
|
|
|
|
},
|
|
|
|
(const phy_cmd_t []) { /* shutdown */
|
|
|
|
{ mk_mii_end, }
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
2010-02-05 11:56:21 +03:00
|
|
|
static phy_info_t phy_info_lan8700 = {
|
|
|
|
0x0007C0C,
|
|
|
|
"LAN8700",
|
|
|
|
(const phy_cmd_t []) { /* config */
|
|
|
|
{ mk_mii_read(MII_REG_CR), mii_parse_cr },
|
|
|
|
{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
|
|
|
|
{ mk_mii_end, }
|
|
|
|
},
|
|
|
|
(const phy_cmd_t []) { /* startup */
|
|
|
|
{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
|
|
|
|
{ mk_mii_read(MII_REG_SR), mii_parse_sr },
|
|
|
|
{ mk_mii_end, }
|
|
|
|
},
|
|
|
|
(const phy_cmd_t []) { /* act_int */
|
|
|
|
{ mk_mii_end, }
|
|
|
|
},
|
|
|
|
(const phy_cmd_t []) { /* shutdown */
|
|
|
|
{ mk_mii_end, }
|
|
|
|
},
|
|
|
|
};
|
2005-04-17 02:20:36 +04:00
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
static phy_info_t const * const phy_info[] = {
|
2005-04-17 02:20:36 +04:00
|
|
|
&phy_info_lxt970,
|
|
|
|
&phy_info_lxt971,
|
|
|
|
&phy_info_qs6612,
|
|
|
|
&phy_info_am79c874,
|
|
|
|
&phy_info_ks8721bl,
|
2005-11-07 07:09:50 +03:00
|
|
|
&phy_info_dp83848,
|
2010-02-05 11:56:21 +03:00
|
|
|
&phy_info_lan8700,
|
2005-04-17 02:20:36 +04:00
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------------------- */
|
2008-05-01 08:04:02 +04:00
|
|
|
#ifdef HAVE_mii_link_interrupt
|
2005-04-17 02:20:36 +04:00
|
|
|
static irqreturn_t
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 17:55:46 +04:00
|
|
|
mii_link_interrupt(int irq, void * dev_id);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
2009-02-27 09:42:51 +03:00
|
|
|
* This is specific to the MII interrupt setup of the M5272EVB.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2009-02-27 09:42:51 +03:00
|
|
|
static void __inline__ fec_request_mii_intr(struct net_device *dev)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2009-02-27 09:42:51 +03:00
|
|
|
if (request_irq(66, mii_link_interrupt, IRQF_DISABLED, "fec(MII)", dev) != 0)
|
|
|
|
printk("FEC: Could not allocate fec(MII) IRQ(66)!\n");
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2009-07-07 06:13:23 +04:00
|
|
|
static void __inline__ fec_disable_phy_intr(struct net_device *dev)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2009-07-07 06:13:23 +04:00
|
|
|
free_irq(66, dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
2009-04-09 02:44:45 +04:00
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-02-27 09:42:51 +03:00
|
|
|
#ifdef CONFIG_M5272
|
2005-11-07 07:09:50 +03:00
|
|
|
static void __inline__ fec_get_mac(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
unsigned char *iap, tmpaddr[ETH_ALEN];
|
|
|
|
|
|
|
|
if (FEC_FLASHMAC) {
|
|
|
|
/*
|
|
|
|
* Get MAC address from FLASH.
|
|
|
|
* If it is all 1's or 0's, use the default.
|
|
|
|
*/
|
2009-02-27 09:42:51 +03:00
|
|
|
iap = (unsigned char *)FEC_FLASHMAC;
|
2006-06-27 07:10:56 +04:00
|
|
|
if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) &&
|
|
|
|
(iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0))
|
|
|
|
iap = fec_mac_default;
|
|
|
|
if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) &&
|
|
|
|
(iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff))
|
|
|
|
iap = fec_mac_default;
|
|
|
|
} else {
|
2009-04-15 07:11:30 +04:00
|
|
|
*((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW);
|
|
|
|
*((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
|
2006-06-27 07:10:56 +04:00
|
|
|
iap = &tmpaddr[0];
|
|
|
|
}
|
|
|
|
|
|
|
|
memcpy(dev->dev_addr, iap, ETH_ALEN);
|
|
|
|
|
|
|
|
/* Adjust MAC if using default MAC address */
|
|
|
|
if (iap == fec_mac_default)
|
2009-02-27 09:42:51 +03:00
|
|
|
dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index;
|
2006-06-27 07:10:56 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* ------------------------------------------------------------------------- */
|
|
|
|
|
|
|
|
static void mii_display_status(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
volatile uint *s = &(fep->phy_status);
|
|
|
|
|
|
|
|
if (!fep->link && !fep->old_link) {
|
|
|
|
/* Link is still down - don't print anything */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
printk("%s: status: ", dev->name);
|
|
|
|
|
|
|
|
if (!fep->link) {
|
|
|
|
printk("link down");
|
|
|
|
} else {
|
|
|
|
printk("link up");
|
|
|
|
|
|
|
|
switch(*s & PHY_STAT_SPMASK) {
|
|
|
|
case PHY_STAT_100FDX: printk(", 100MBit Full Duplex"); break;
|
|
|
|
case PHY_STAT_100HDX: printk(", 100MBit Half Duplex"); break;
|
|
|
|
case PHY_STAT_10FDX: printk(", 10MBit Full Duplex"); break;
|
|
|
|
case PHY_STAT_10HDX: printk(", 10MBit Half Duplex"); break;
|
|
|
|
default:
|
|
|
|
printk(", Unknown speed/duplex");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*s & PHY_STAT_ANC)
|
|
|
|
printk(", auto-negotiation complete");
|
|
|
|
}
|
|
|
|
|
|
|
|
if (*s & PHY_STAT_FAULT)
|
|
|
|
printk(", remote fault");
|
|
|
|
|
|
|
|
printk(".\n");
|
|
|
|
}
|
|
|
|
|
2007-07-30 10:29:09 +04:00
|
|
|
static void mii_display_config(struct work_struct *work)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2007-07-30 10:29:09 +04:00
|
|
|
struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
|
|
|
|
struct net_device *dev = fep->netdev;
|
2005-09-12 05:18:10 +04:00
|
|
|
uint status = fep->phy_status;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/*
|
|
|
|
** When we get here, phy_task is already removed from
|
|
|
|
** the workqueue. It is thus safe to allow to reuse it.
|
|
|
|
*/
|
|
|
|
fep->mii_phy_task_queued = 0;
|
|
|
|
printk("%s: config: auto-negotiation ", dev->name);
|
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
if (status & PHY_CONF_ANE)
|
2005-04-17 02:20:36 +04:00
|
|
|
printk("on");
|
|
|
|
else
|
|
|
|
printk("off");
|
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
if (status & PHY_CONF_100FDX)
|
2005-04-17 02:20:36 +04:00
|
|
|
printk(", 100FDX");
|
2005-09-12 05:18:10 +04:00
|
|
|
if (status & PHY_CONF_100HDX)
|
2005-04-17 02:20:36 +04:00
|
|
|
printk(", 100HDX");
|
2005-09-12 05:18:10 +04:00
|
|
|
if (status & PHY_CONF_10FDX)
|
2005-04-17 02:20:36 +04:00
|
|
|
printk(", 10FDX");
|
2005-09-12 05:18:10 +04:00
|
|
|
if (status & PHY_CONF_10HDX)
|
2005-04-17 02:20:36 +04:00
|
|
|
printk(", 10HDX");
|
2005-09-12 05:18:10 +04:00
|
|
|
if (!(status & PHY_CONF_SPMASK))
|
2005-04-17 02:20:36 +04:00
|
|
|
printk(", No speed/duplex selected?");
|
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
if (status & PHY_CONF_LOOP)
|
2005-04-17 02:20:36 +04:00
|
|
|
printk(", loopback enabled");
|
2006-09-13 21:24:59 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
printk(".\n");
|
|
|
|
|
|
|
|
fep->sequence_done = 1;
|
|
|
|
}
|
|
|
|
|
2007-07-30 10:29:09 +04:00
|
|
|
static void mii_relink(struct work_struct *work)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2007-07-30 10:29:09 +04:00
|
|
|
struct fec_enet_private *fep = container_of(work, struct fec_enet_private, phy_task);
|
|
|
|
struct net_device *dev = fep->netdev;
|
2005-04-17 02:20:36 +04:00
|
|
|
int duplex;
|
|
|
|
|
|
|
|
/*
|
|
|
|
** When we get here, phy_task is already removed from
|
|
|
|
** the workqueue. It is thus safe to allow to reuse it.
|
|
|
|
*/
|
|
|
|
fep->mii_phy_task_queued = 0;
|
|
|
|
fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
|
|
|
|
mii_display_status(dev);
|
|
|
|
fep->old_link = fep->link;
|
|
|
|
|
|
|
|
if (fep->link) {
|
|
|
|
duplex = 0;
|
2006-09-13 21:24:59 +04:00
|
|
|
if (fep->phy_status
|
2005-04-17 02:20:36 +04:00
|
|
|
& (PHY_STAT_100FDX | PHY_STAT_10FDX))
|
|
|
|
duplex = 1;
|
|
|
|
fec_restart(dev, duplex);
|
2007-10-23 08:37:54 +04:00
|
|
|
} else
|
2005-04-17 02:20:36 +04:00
|
|
|
fec_stop(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* mii_queue_relink is called in interrupt context from mii_link_interrupt */
|
|
|
|
static void mii_queue_relink(uint mii_reg, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
|
|
|
|
/*
|
2009-04-15 05:32:18 +04:00
|
|
|
* We cannot queue phy_task twice in the workqueue. It
|
|
|
|
* would cause an endless loop in the workqueue.
|
|
|
|
* Fortunately, if the last mii_relink entry has not yet been
|
|
|
|
* executed now, it will do the job for the current interrupt,
|
|
|
|
* which is just what we want.
|
|
|
|
*/
|
2005-04-17 02:20:36 +04:00
|
|
|
if (fep->mii_phy_task_queued)
|
|
|
|
return;
|
|
|
|
|
|
|
|
fep->mii_phy_task_queued = 1;
|
2007-07-30 10:29:09 +04:00
|
|
|
INIT_WORK(&fep->phy_task, mii_relink);
|
2005-04-17 02:20:36 +04:00
|
|
|
schedule_work(&fep->phy_task);
|
|
|
|
}
|
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
/* mii_queue_config is called in interrupt context from fec_enet_mii */
|
2005-04-17 02:20:36 +04:00
|
|
|
static void mii_queue_config(uint mii_reg, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
|
|
|
|
if (fep->mii_phy_task_queued)
|
|
|
|
return;
|
|
|
|
|
|
|
|
fep->mii_phy_task_queued = 1;
|
2007-07-30 10:29:09 +04:00
|
|
|
INIT_WORK(&fep->phy_task, mii_display_config);
|
2005-04-17 02:20:36 +04:00
|
|
|
schedule_work(&fep->phy_task);
|
|
|
|
}
|
|
|
|
|
2005-09-12 05:18:10 +04:00
|
|
|
phy_cmd_t const phy_cmd_relink[] = {
|
|
|
|
{ mk_mii_read(MII_REG_CR), mii_queue_relink },
|
|
|
|
{ mk_mii_end, }
|
|
|
|
};
|
|
|
|
phy_cmd_t const phy_cmd_config[] = {
|
|
|
|
{ mk_mii_read(MII_REG_CR), mii_queue_config },
|
|
|
|
{ mk_mii_end, }
|
|
|
|
};
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Read remainder of PHY ID. */
|
2005-04-17 02:20:36 +04:00
|
|
|
static void
|
|
|
|
mii_discover_phy3(uint mii_reg, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
fep = netdev_priv(dev);
|
|
|
|
fep->phy_id |= (mii_reg & 0xffff);
|
|
|
|
printk("fec: PHY @ 0x%x, ID 0x%08x", fep->phy_addr, fep->phy_id);
|
|
|
|
|
|
|
|
for(i = 0; phy_info[i]; i++) {
|
|
|
|
if(phy_info[i]->id == (fep->phy_id >> 4))
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (phy_info[i])
|
|
|
|
printk(" -- %s\n", phy_info[i]->name);
|
|
|
|
else
|
|
|
|
printk(" -- unknown PHY!\n");
|
2006-09-13 21:24:59 +04:00
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
fep->phy = phy_info[i];
|
|
|
|
fep->phy_id_done = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Scan all of the MII PHY addresses looking for someone to respond
|
|
|
|
* with a valid ID. This usually happens quickly.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
mii_discover_phy(uint mii_reg, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep;
|
|
|
|
uint phytype;
|
|
|
|
|
|
|
|
fep = netdev_priv(dev);
|
|
|
|
|
|
|
|
if (fep->phy_addr < 32) {
|
|
|
|
if ((phytype = (mii_reg & 0xffff)) != 0xffff && phytype != 0) {
|
2006-09-13 21:24:59 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Got first part of ID, now get remainder */
|
2005-04-17 02:20:36 +04:00
|
|
|
fep->phy_id = phytype << 16;
|
2009-09-02 03:14:15 +04:00
|
|
|
mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR2),
|
2005-04-17 02:20:36 +04:00
|
|
|
mii_discover_phy3);
|
2007-10-23 08:37:54 +04:00
|
|
|
} else {
|
2005-04-17 02:20:36 +04:00
|
|
|
fep->phy_addr++;
|
2009-09-02 03:14:15 +04:00
|
|
|
mii_queue_unlocked(dev, mk_mii_read(MII_REG_PHYIR1),
|
2005-04-17 02:20:36 +04:00
|
|
|
mii_discover_phy);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
printk("FEC: No PHY device found.\n");
|
|
|
|
/* Disable external MII interface */
|
2009-04-15 07:11:30 +04:00
|
|
|
writel(0, fep->hwp + FEC_MII_SPEED);
|
|
|
|
fep->phy_speed = 0;
|
2009-02-27 09:42:51 +03:00
|
|
|
#ifdef HAVE_mii_link_interrupt
|
2009-07-07 06:13:23 +04:00
|
|
|
fec_disable_phy_intr(dev);
|
2009-01-29 02:03:11 +03:00
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* This interrupt occurs when the PHY detects a link change */
|
2008-05-01 08:04:02 +04:00
|
|
|
#ifdef HAVE_mii_link_interrupt
|
2005-04-17 02:20:36 +04:00
|
|
|
static irqreturn_t
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 17:55:46 +04:00
|
|
|
mii_link_interrupt(int irq, void * dev_id)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct net_device *dev = dev_id;
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
|
|
|
|
mii_do_cmd(dev, fep->phy->ack_int);
|
|
|
|
mii_do_cmd(dev, phy_cmd_relink); /* restart and display status */
|
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
2008-05-01 08:04:02 +04:00
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:24 +04:00
|
|
|
static void fec_enet_free_buffers(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
int i;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct bufdesc *bdp;
|
|
|
|
|
|
|
|
bdp = fep->rx_bd_base;
|
|
|
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
|
|
|
skb = fep->rx_skbuff[i];
|
|
|
|
|
|
|
|
if (bdp->cbd_bufaddr)
|
|
|
|
dma_unmap_single(&dev->dev, bdp->cbd_bufaddr,
|
|
|
|
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
|
|
|
|
if (skb)
|
|
|
|
dev_kfree_skb(skb);
|
|
|
|
bdp++;
|
|
|
|
}
|
|
|
|
|
|
|
|
bdp = fep->tx_bd_base;
|
|
|
|
for (i = 0; i < TX_RING_SIZE; i++)
|
|
|
|
kfree(fep->tx_bounce[i]);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int fec_enet_alloc_buffers(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
int i;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
struct bufdesc *bdp;
|
|
|
|
|
|
|
|
bdp = fep->rx_bd_base;
|
|
|
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
|
|
|
skb = dev_alloc_skb(FEC_ENET_RX_FRSIZE);
|
|
|
|
if (!skb) {
|
|
|
|
fec_enet_free_buffers(dev);
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
fep->rx_skbuff[i] = skb;
|
|
|
|
|
|
|
|
bdp->cbd_bufaddr = dma_map_single(&dev->dev, skb->data,
|
|
|
|
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
|
|
|
|
bdp->cbd_sc = BD_ENET_RX_EMPTY;
|
|
|
|
bdp++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the last buffer to wrap. */
|
|
|
|
bdp--;
|
|
|
|
bdp->cbd_sc |= BD_SC_WRAP;
|
|
|
|
|
|
|
|
bdp = fep->tx_bd_base;
|
|
|
|
for (i = 0; i < TX_RING_SIZE; i++) {
|
|
|
|
fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
|
|
|
|
|
|
|
|
bdp->cbd_sc = 0;
|
|
|
|
bdp->cbd_bufaddr = 0;
|
|
|
|
bdp++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the last buffer to wrap. */
|
|
|
|
bdp--;
|
|
|
|
bdp->cbd_sc |= BD_SC_WRAP;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
static int
|
|
|
|
fec_enet_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
2009-04-15 05:32:24 +04:00
|
|
|
int ret;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* I should reset the ring buffers here, but I don't yet know
|
|
|
|
* a simple way to do that.
|
|
|
|
*/
|
|
|
|
|
2009-04-15 05:32:24 +04:00
|
|
|
ret = fec_enet_alloc_buffers(dev);
|
|
|
|
if (ret)
|
|
|
|
return ret;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
fep->sequence_done = 0;
|
|
|
|
fep->link = 0;
|
|
|
|
|
2009-04-15 05:32:25 +04:00
|
|
|
fec_restart(dev, 1);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
if (fep->phy) {
|
|
|
|
mii_do_cmd(dev, fep->phy->ack_int);
|
|
|
|
mii_do_cmd(dev, fep->phy->config);
|
|
|
|
mii_do_cmd(dev, phy_cmd_config); /* display configuration */
|
|
|
|
|
2006-06-27 07:10:56 +04:00
|
|
|
/* Poll until the PHY tells us its configuration
|
|
|
|
* (not link state).
|
|
|
|
* Request is initiated by mii_do_cmd above, but answer
|
|
|
|
* comes by interrupt.
|
|
|
|
* This should take about 25 usec per register at 2.5 MHz,
|
|
|
|
* and we read approximately 5 registers.
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
|
|
|
while(!fep->sequence_done)
|
|
|
|
schedule();
|
|
|
|
|
|
|
|
mii_do_cmd(dev, fep->phy->startup);
|
|
|
|
}
|
|
|
|
|
2009-04-15 05:32:25 +04:00
|
|
|
/* Set the initial link state to true. A lot of hardware
|
|
|
|
* based on this device does not implement a PHY interrupt,
|
|
|
|
* so we are never notified of link change.
|
|
|
|
*/
|
|
|
|
fep->link = 1;
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
netif_start_queue(dev);
|
|
|
|
fep->opened = 1;
|
2009-04-15 05:32:18 +04:00
|
|
|
return 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fec_enet_close(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Don't know what to do yet. */
|
2005-04-17 02:20:36 +04:00
|
|
|
fep->opened = 0;
|
|
|
|
netif_stop_queue(dev);
|
|
|
|
fec_stop(dev);
|
|
|
|
|
2009-04-15 05:32:24 +04:00
|
|
|
fec_enet_free_buffers(dev);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set or clear the multicast filter for this adaptor.
|
|
|
|
* Skeleton taken from sunlance driver.
|
|
|
|
* The CPM Ethernet implementation allows Multicast as well as individual
|
|
|
|
* MAC address filtering. Some of the drivers check to make sure it is
|
|
|
|
* a group multicast address, and discard those that are not. I guess I
|
|
|
|
* will do the same for now, but just remove the test if you want
|
|
|
|
* individual filtering as well (do the upper net layers want or support
|
|
|
|
* this kind of feature?).
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define HASH_BITS 6 /* #bits in hash */
|
|
|
|
#define CRC32_POLY 0xEDB88320
|
|
|
|
|
|
|
|
static void set_multicast_list(struct net_device *dev)
|
|
|
|
{
|
2009-04-15 07:11:30 +04:00
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
struct dev_mc_list *dmi;
|
2010-02-22 12:22:26 +03:00
|
|
|
unsigned int i, bit, data, crc, tmp;
|
2005-04-17 02:20:36 +04:00
|
|
|
unsigned char hash;
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
if (dev->flags & IFF_PROMISC) {
|
2009-04-15 07:11:30 +04:00
|
|
|
tmp = readl(fep->hwp + FEC_R_CNTRL);
|
|
|
|
tmp |= 0x8;
|
|
|
|
writel(tmp, fep->hwp + FEC_R_CNTRL);
|
2009-04-15 05:32:19 +04:00
|
|
|
return;
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:19 +04:00
|
|
|
tmp = readl(fep->hwp + FEC_R_CNTRL);
|
|
|
|
tmp &= ~0x8;
|
|
|
|
writel(tmp, fep->hwp + FEC_R_CNTRL);
|
|
|
|
|
|
|
|
if (dev->flags & IFF_ALLMULTI) {
|
|
|
|
/* Catch all multicast addresses, so set the
|
|
|
|
* filter to all 1's
|
|
|
|
*/
|
|
|
|
writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
|
|
|
|
writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear filter and add the addresses in hash register
|
|
|
|
*/
|
|
|
|
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
|
|
|
|
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
|
|
|
|
|
2010-02-22 12:22:26 +03:00
|
|
|
netdev_for_each_mc_addr(dmi, dev) {
|
2009-04-15 05:32:19 +04:00
|
|
|
/* Only support group multicast for now */
|
|
|
|
if (!(dmi->dmi_addr[0] & 1))
|
|
|
|
continue;
|
|
|
|
|
|
|
|
/* calculate crc32 value of mac address */
|
|
|
|
crc = 0xffffffff;
|
|
|
|
|
|
|
|
for (i = 0; i < dmi->dmi_addrlen; i++) {
|
|
|
|
data = dmi->dmi_addr[i];
|
|
|
|
for (bit = 0; bit < 8; bit++, data >>= 1) {
|
|
|
|
crc = (crc >> 1) ^
|
|
|
|
(((crc ^ data) & 1) ? CRC32_POLY : 0);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
2009-04-15 05:32:19 +04:00
|
|
|
|
|
|
|
/* only upper 6 bits (HASH_BITS) are used
|
|
|
|
* which point to specific bit in he hash registers
|
|
|
|
*/
|
|
|
|
hash = (crc >> (32 - HASH_BITS)) & 0x3f;
|
|
|
|
|
|
|
|
if (hash > 31) {
|
|
|
|
tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
|
|
|
|
tmp |= 1 << (hash - 32);
|
|
|
|
writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
|
|
|
|
} else {
|
|
|
|
tmp = readl(fep->hwp + FEC_GRP_HASH_TABLE_LOW);
|
|
|
|
tmp |= 1 << hash;
|
|
|
|
writel(tmp, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
|
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Set a MAC change in hardware. */
|
2009-04-15 05:32:23 +04:00
|
|
|
static int
|
|
|
|
fec_set_mac_address(struct net_device *dev, void *p)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
2009-04-15 07:11:30 +04:00
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
2009-04-15 05:32:23 +04:00
|
|
|
struct sockaddr *addr = p;
|
|
|
|
|
|
|
|
if (!is_valid_ether_addr(addr->sa_data))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
|
|
|
|
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 07:11:30 +04:00
|
|
|
writel(dev->dev_addr[3] | (dev->dev_addr[2] << 8) |
|
|
|
|
(dev->dev_addr[1] << 16) | (dev->dev_addr[0] << 24),
|
|
|
|
fep->hwp + FEC_ADDR_LOW);
|
|
|
|
writel((dev->dev_addr[5] << 16) | (dev->dev_addr[4] << 24),
|
|
|
|
fep + FEC_ADDR_HIGH);
|
2009-04-15 05:32:23 +04:00
|
|
|
return 0;
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2009-04-15 05:32:23 +04:00
|
|
|
static const struct net_device_ops fec_netdev_ops = {
|
|
|
|
.ndo_open = fec_enet_open,
|
|
|
|
.ndo_stop = fec_enet_close,
|
|
|
|
.ndo_start_xmit = fec_enet_start_xmit,
|
|
|
|
.ndo_set_multicast_list = set_multicast_list,
|
2009-07-09 21:59:01 +04:00
|
|
|
.ndo_change_mtu = eth_change_mtu,
|
2009-04-15 05:32:23 +04:00
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
|
|
|
.ndo_tx_timeout = fec_timeout,
|
|
|
|
.ndo_set_mac_address = fec_set_mac_address,
|
|
|
|
};
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
/*
|
|
|
|
* XXX: We need to clean up on failure exits here.
|
2009-01-29 02:03:11 +03:00
|
|
|
*
|
|
|
|
* index is only used in legacy code
|
2005-04-17 02:20:36 +04:00
|
|
|
*/
|
2009-10-21 05:51:37 +04:00
|
|
|
static int fec_enet_init(struct net_device *dev, int index)
|
2005-04-17 02:20:36 +04:00
|
|
|
{
|
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
2009-04-15 05:32:24 +04:00
|
|
|
struct bufdesc *cbd_base;
|
2010-02-05 11:56:20 +03:00
|
|
|
struct bufdesc *bdp;
|
2009-04-15 05:32:24 +04:00
|
|
|
int i;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:17 +04:00
|
|
|
/* Allocate memory for buffer descriptors. */
|
|
|
|
cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
|
|
|
|
GFP_KERNEL);
|
|
|
|
if (!cbd_base) {
|
2005-11-07 07:09:50 +03:00
|
|
|
printk("FEC: allocate descriptor memory failed?\n");
|
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
2008-05-01 08:08:12 +04:00
|
|
|
spin_lock_init(&fep->hw_lock);
|
|
|
|
spin_lock_init(&fep->mii_lock);
|
|
|
|
|
2005-04-17 02:20:36 +04:00
|
|
|
fep->index = index;
|
2009-04-15 07:11:30 +04:00
|
|
|
fep->hwp = (void __iomem *)dev->base_addr;
|
2007-07-30 10:29:09 +04:00
|
|
|
fep->netdev = dev;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-01-29 02:03:11 +03:00
|
|
|
/* Set the Ethernet address */
|
2009-02-27 09:42:51 +03:00
|
|
|
#ifdef CONFIG_M5272
|
2005-04-17 02:20:36 +04:00
|
|
|
fec_get_mac(dev);
|
2009-01-29 02:03:11 +03:00
|
|
|
#else
|
|
|
|
{
|
|
|
|
unsigned long l;
|
2009-04-15 07:11:30 +04:00
|
|
|
l = readl(fep->hwp + FEC_ADDR_LOW);
|
2009-01-29 02:03:11 +03:00
|
|
|
dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
|
|
|
|
dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
|
|
|
|
dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
|
|
|
|
dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
|
2009-04-15 07:11:30 +04:00
|
|
|
l = readl(fep->hwp + FEC_ADDR_HIGH);
|
2009-01-29 02:03:11 +03:00
|
|
|
dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
|
|
|
|
dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
|
|
|
|
}
|
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:17 +04:00
|
|
|
/* Set receive and transmit descriptor base. */
|
2005-04-17 02:20:36 +04:00
|
|
|
fep->rx_bd_base = cbd_base;
|
|
|
|
fep->tx_bd_base = cbd_base + RX_RING_SIZE;
|
|
|
|
|
2009-02-27 09:42:51 +03:00
|
|
|
#ifdef HAVE_mii_link_interrupt
|
|
|
|
fec_request_mii_intr(dev);
|
2009-01-29 02:03:11 +03:00
|
|
|
#endif
|
2009-04-15 05:32:18 +04:00
|
|
|
/* The FEC Ethernet specific entries in the device structure */
|
2005-04-17 02:20:36 +04:00
|
|
|
dev->watchdog_timeo = TX_TIMEOUT;
|
2009-04-15 05:32:23 +04:00
|
|
|
dev->netdev_ops = &fec_netdev_ops;
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
for (i=0; i<NMII-1; i++)
|
|
|
|
mii_cmds[i].mii_next = &mii_cmds[i+1];
|
|
|
|
mii_free = mii_cmds;
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Set MII speed to 2.5 MHz */
|
2009-01-29 02:03:11 +03:00
|
|
|
fep->phy_speed = ((((clk_get_rate(fep->clk) / 2 + 4999999)
|
|
|
|
/ 2500000) / 2) & 0x3F) << 1;
|
2010-02-05 11:56:20 +03:00
|
|
|
|
|
|
|
/* Initialize the receive buffer descriptors. */
|
|
|
|
bdp = fep->rx_bd_base;
|
|
|
|
for (i = 0; i < RX_RING_SIZE; i++) {
|
|
|
|
|
|
|
|
/* Initialize the BD for every fragment in the page. */
|
|
|
|
bdp->cbd_sc = 0;
|
|
|
|
bdp++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the last buffer to wrap */
|
|
|
|
bdp--;
|
|
|
|
bdp->cbd_sc |= BD_SC_WRAP;
|
|
|
|
|
|
|
|
/* ...and the same for transmit */
|
|
|
|
bdp = fep->tx_bd_base;
|
|
|
|
for (i = 0; i < TX_RING_SIZE; i++) {
|
|
|
|
|
|
|
|
/* Initialize the BD for every fragment in the page. */
|
|
|
|
bdp->cbd_sc = 0;
|
|
|
|
bdp->cbd_bufaddr = 0;
|
|
|
|
bdp++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the last buffer to wrap */
|
|
|
|
bdp--;
|
|
|
|
bdp->cbd_sc |= BD_SC_WRAP;
|
|
|
|
|
2009-01-29 02:03:11 +03:00
|
|
|
fec_restart(dev, 0);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
/* Queue up command to detect the PHY and initialize the
|
|
|
|
* remainder of the interface.
|
|
|
|
*/
|
|
|
|
fep->phy_id_done = 0;
|
|
|
|
fep->phy_addr = 0;
|
|
|
|
mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* This function is called to start or restart the FEC during a link
|
|
|
|
* change. This only happens when switching between half and full
|
|
|
|
* duplex.
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
fec_restart(struct net_device *dev, int duplex)
|
|
|
|
{
|
2009-04-15 07:11:30 +04:00
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
int i;
|
|
|
|
|
2009-04-15 07:11:30 +04:00
|
|
|
/* Whack a reset. We should wait for this. */
|
|
|
|
writel(1, fep->hwp + FEC_ECNTRL);
|
2005-04-17 02:20:36 +04:00
|
|
|
udelay(10);
|
|
|
|
|
2009-04-15 07:11:30 +04:00
|
|
|
/* Clear any outstanding interrupt. */
|
|
|
|
writel(0xffc00000, fep->hwp + FEC_IEVENT);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 07:11:30 +04:00
|
|
|
/* Reset all multicast. */
|
|
|
|
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
|
|
|
|
writel(0, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
|
2009-04-15 05:32:20 +04:00
|
|
|
#ifndef CONFIG_M5272
|
|
|
|
writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
|
|
|
|
writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
|
|
|
|
#endif
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 07:11:30 +04:00
|
|
|
/* Set maximum receive buffer size. */
|
|
|
|
writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 07:11:30 +04:00
|
|
|
/* Set receive and transmit descriptor base. */
|
|
|
|
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
|
2009-04-15 05:32:16 +04:00
|
|
|
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) * RX_RING_SIZE,
|
2009-04-15 07:11:30 +04:00
|
|
|
fep->hwp + FEC_X_DES_START);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
|
|
|
fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
|
|
|
|
fep->cur_rx = fep->rx_bd_base;
|
|
|
|
|
2009-04-15 07:11:30 +04:00
|
|
|
/* Reset SKB transmit buffers. */
|
2005-04-17 02:20:36 +04:00
|
|
|
fep->skb_cur = fep->skb_dirty = 0;
|
2009-04-15 05:32:18 +04:00
|
|
|
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
|
|
|
|
if (fep->tx_skbuff[i]) {
|
2005-04-17 02:20:36 +04:00
|
|
|
dev_kfree_skb_any(fep->tx_skbuff[i]);
|
|
|
|
fep->tx_skbuff[i] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Enable MII mode */
|
2005-04-17 02:20:36 +04:00
|
|
|
if (duplex) {
|
2009-04-15 07:11:30 +04:00
|
|
|
/* MII enable / FD enable */
|
|
|
|
writel(OPT_FRAME_SIZE | 0x04, fep->hwp + FEC_R_CNTRL);
|
|
|
|
writel(0x04, fep->hwp + FEC_X_CNTRL);
|
2007-10-23 08:37:54 +04:00
|
|
|
} else {
|
2009-04-15 07:11:30 +04:00
|
|
|
/* MII enable / No Rcv on Xmit */
|
|
|
|
writel(OPT_FRAME_SIZE | 0x06, fep->hwp + FEC_R_CNTRL);
|
|
|
|
writel(0x0, fep->hwp + FEC_X_CNTRL);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
fep->full_duplex = duplex;
|
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Set MII speed */
|
2009-04-15 07:11:30 +04:00
|
|
|
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* And last, enable the transmit and receive processing */
|
2009-04-15 07:11:30 +04:00
|
|
|
writel(2, fep->hwp + FEC_ECNTRL);
|
|
|
|
writel(0, fep->hwp + FEC_R_DES_ACTIVE);
|
2006-06-27 07:10:56 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* Enable interrupts we wish to service */
|
2009-04-15 07:11:30 +04:00
|
|
|
writel(FEC_ENET_TXF | FEC_ENET_RXF | FEC_ENET_MII,
|
|
|
|
fep->hwp + FEC_IMASK);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
fec_stop(struct net_device *dev)
|
|
|
|
{
|
2009-04-15 07:11:30 +04:00
|
|
|
struct fec_enet_private *fep = netdev_priv(dev);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 05:32:18 +04:00
|
|
|
/* We cannot expect a graceful transmit stop without link !!! */
|
2009-04-15 07:11:30 +04:00
|
|
|
if (fep->link) {
|
|
|
|
writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
|
2006-06-27 07:05:33 +04:00
|
|
|
udelay(10);
|
2009-04-15 07:11:30 +04:00
|
|
|
if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
|
2006-06-27 07:05:33 +04:00
|
|
|
printk("fec_stop : Graceful transmit stop did not complete !\n");
|
2009-04-15 07:11:30 +04:00
|
|
|
}
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 07:11:30 +04:00
|
|
|
/* Whack a reset. We should wait for this. */
|
|
|
|
writel(1, fep->hwp + FEC_ECNTRL);
|
2005-04-17 02:20:36 +04:00
|
|
|
udelay(10);
|
|
|
|
|
2009-04-15 07:11:30 +04:00
|
|
|
/* Clear outstanding MII command interrupts. */
|
|
|
|
writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
|
2005-04-17 02:20:36 +04:00
|
|
|
|
2009-04-15 07:11:30 +04:00
|
|
|
writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
|
|
|
|
writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
|
2005-04-17 02:20:36 +04:00
|
|
|
}
|
|
|
|
|
2009-01-29 02:03:11 +03:00
|
|
|
static int __devinit
|
|
|
|
fec_probe(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct fec_enet_private *fep;
|
|
|
|
struct net_device *ndev;
|
|
|
|
int i, irq, ret = 0;
|
|
|
|
struct resource *r;
|
|
|
|
|
|
|
|
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
|
|
|
if (!r)
|
|
|
|
return -ENXIO;
|
|
|
|
|
|
|
|
r = request_mem_region(r->start, resource_size(r), pdev->name);
|
|
|
|
if (!r)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
/* Init network device */
|
|
|
|
ndev = alloc_etherdev(sizeof(struct fec_enet_private));
|
|
|
|
if (!ndev)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
SET_NETDEV_DEV(ndev, &pdev->dev);
|
|
|
|
|
|
|
|
/* setup board info structure */
|
|
|
|
fep = netdev_priv(ndev);
|
|
|
|
memset(fep, 0, sizeof(*fep));
|
|
|
|
|
|
|
|
ndev->base_addr = (unsigned long)ioremap(r->start, resource_size(r));
|
|
|
|
|
|
|
|
if (!ndev->base_addr) {
|
|
|
|
ret = -ENOMEM;
|
|
|
|
goto failed_ioremap;
|
|
|
|
}
|
|
|
|
|
|
|
|
platform_set_drvdata(pdev, ndev);
|
|
|
|
|
|
|
|
/* This device has up to three irqs on some platforms */
|
|
|
|
for (i = 0; i < 3; i++) {
|
|
|
|
irq = platform_get_irq(pdev, i);
|
|
|
|
if (i && irq < 0)
|
|
|
|
break;
|
|
|
|
ret = request_irq(irq, fec_enet_interrupt, IRQF_DISABLED, pdev->name, ndev);
|
|
|
|
if (ret) {
|
|
|
|
while (i >= 0) {
|
|
|
|
irq = platform_get_irq(pdev, i);
|
|
|
|
free_irq(irq, ndev);
|
|
|
|
i--;
|
|
|
|
}
|
|
|
|
goto failed_irq;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fep->clk = clk_get(&pdev->dev, "fec_clk");
|
|
|
|
if (IS_ERR(fep->clk)) {
|
|
|
|
ret = PTR_ERR(fep->clk);
|
|
|
|
goto failed_clk;
|
|
|
|
}
|
|
|
|
clk_enable(fep->clk);
|
|
|
|
|
|
|
|
ret = fec_enet_init(ndev, 0);
|
|
|
|
if (ret)
|
|
|
|
goto failed_init;
|
|
|
|
|
|
|
|
ret = register_netdev(ndev);
|
|
|
|
if (ret)
|
|
|
|
goto failed_register;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
failed_register:
|
|
|
|
failed_init:
|
|
|
|
clk_disable(fep->clk);
|
|
|
|
clk_put(fep->clk);
|
|
|
|
failed_clk:
|
|
|
|
for (i = 0; i < 3; i++) {
|
|
|
|
irq = platform_get_irq(pdev, i);
|
|
|
|
if (irq > 0)
|
|
|
|
free_irq(irq, ndev);
|
|
|
|
}
|
|
|
|
failed_irq:
|
|
|
|
iounmap((void __iomem *)ndev->base_addr);
|
|
|
|
failed_ioremap:
|
|
|
|
free_netdev(ndev);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int __devexit
|
|
|
|
fec_drv_remove(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = platform_get_drvdata(pdev);
|
|
|
|
struct fec_enet_private *fep = netdev_priv(ndev);
|
|
|
|
|
|
|
|
platform_set_drvdata(pdev, NULL);
|
|
|
|
|
|
|
|
fec_stop(ndev);
|
|
|
|
clk_disable(fep->clk);
|
|
|
|
clk_put(fep->clk);
|
|
|
|
iounmap((void __iomem *)ndev->base_addr);
|
|
|
|
unregister_netdev(ndev);
|
|
|
|
free_netdev(ndev);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fec_suspend(struct platform_device *dev, pm_message_t state)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = platform_get_drvdata(dev);
|
|
|
|
struct fec_enet_private *fep;
|
|
|
|
|
|
|
|
if (ndev) {
|
|
|
|
fep = netdev_priv(ndev);
|
|
|
|
if (netif_running(ndev)) {
|
|
|
|
netif_device_detach(ndev);
|
|
|
|
fec_stop(ndev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
fec_resume(struct platform_device *dev)
|
|
|
|
{
|
|
|
|
struct net_device *ndev = platform_get_drvdata(dev);
|
|
|
|
|
|
|
|
if (ndev) {
|
|
|
|
if (netif_running(ndev)) {
|
|
|
|
fec_enet_init(ndev, 0);
|
|
|
|
netif_device_attach(ndev);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct platform_driver fec_driver = {
|
|
|
|
.driver = {
|
|
|
|
.name = "fec",
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
},
|
|
|
|
.probe = fec_probe,
|
|
|
|
.remove = __devexit_p(fec_drv_remove),
|
|
|
|
.suspend = fec_suspend,
|
|
|
|
.resume = fec_resume,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init
|
|
|
|
fec_enet_module_init(void)
|
|
|
|
{
|
|
|
|
printk(KERN_INFO "FEC Ethernet Driver\n");
|
|
|
|
|
|
|
|
return platform_driver_register(&fec_driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void __exit
|
|
|
|
fec_enet_cleanup(void)
|
|
|
|
{
|
|
|
|
platform_driver_unregister(&fec_driver);
|
|
|
|
}
|
|
|
|
|
|
|
|
module_exit(fec_enet_cleanup);
|
2005-04-17 02:20:36 +04:00
|
|
|
module_init(fec_enet_module_init);
|
|
|
|
|
|
|
|
MODULE_LICENSE("GPL");
|