Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26: (1090 commits)
  [NET]: Fix and allocate less memory for ->priv'less netdevices
  [IPV6]: Fix dangling references on error in fib6_add().
  [NETLABEL]: Fix NULL deref in netlbl_unlabel_staticlist_gen() if ifindex not found
  [PKT_SCHED]: Fix datalen check in tcf_simp_init().
  [INET]: Uninline the __inet_inherit_port call.
  [INET]: Drop the inet_inherit_port() call.
  SCTP: Initialize partial_bytes_acked to 0, when all of the data is acked.
  [netdrvr] forcedeth: internal simplifications; changelog removal
  phylib: factor out get_phy_id from within get_phy_device
  PHY: add BCM5464 support to broadcom PHY driver
  cxgb3: Fix __must_check warning with dev_dbg.
  tc35815: Statistics cleanup
  natsemi: fix MMIO for PPC 44x platforms
  [TIPC]: Cleanup of TIPC reference table code
  [TIPC]: Optimized initialization of TIPC reference table
  [TIPC]: Remove inlining of reference table locking routines
  e1000: convert uint16_t style integers to u16
  ixgb: convert uint16_t style integers to u16
  sb1000.c: make const arrays static
  sb1000.c: stop inlining largish static functions
  ...
This commit is contained in:
Linus Torvalds 2008-04-18 18:02:35 -07:00
Родитель d1a4be630f d1643d24c6
Коммит 334d094504
944 изменённых файлов: 82903 добавлений и 120177 удалений

Просмотреть файл

@ -11,7 +11,8 @@ DOCBOOKS := wanbook.xml z8530book.xml mcabook.xml videobook.xml \
procfs-guide.xml writing_usb_driver.xml networking.xml \
kernel-api.xml filesystems.xml lsm.xml usb.xml kgdb.xml \
gadget.xml libata.xml mtdnand.xml librs.xml rapidio.xml \
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml
genericirq.xml s390-drivers.xml uio-howto.xml scsi.xml \
mac80211.xml
###
# The build process is as follows (targets):

Просмотреть файл

@ -0,0 +1,335 @@
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.1.2//EN"
"http://www.oasis-open.org/docbook/xml/4.1.2/docbookx.dtd" []>
<book id="mac80211-developers-guide">
<bookinfo>
<title>The mac80211 subsystem for kernel developers</title>
<authorgroup>
<author>
<firstname>Johannes</firstname>
<surname>Berg</surname>
<affiliation>
<address><email>johannes@sipsolutions.net</email></address>
</affiliation>
</author>
</authorgroup>
<copyright>
<year>2007</year>
<year>2008</year>
<holder>Johannes Berg</holder>
</copyright>
<legalnotice>
<para>
This documentation is free software; you can redistribute
it and/or modify it under the terms of the GNU General Public
License version 2 as published by the Free Software Foundation.
</para>
<para>
This documentation is distributed in the hope that it will be
useful, but WITHOUT ANY WARRANTY; without even the implied
warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
</para>
<para>
You should have received a copy of the GNU General Public
License along with this documentation; if not, write to the Free
Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
MA 02111-1307 USA
</para>
<para>
For more details see the file COPYING in the source
distribution of Linux.
</para>
</legalnotice>
<abstract>
!Pinclude/net/mac80211.h Introduction
!Pinclude/net/mac80211.h Warning
</abstract>
</bookinfo>
<toc></toc>
<!--
Generally, this document shall be ordered by increasing complexity.
It is important to note that readers should be able to read only
the first few sections to get a working driver and only advanced
usage should require reading the full document.
-->
<part>
<title>The basic mac80211 driver interface</title>
<partintro>
<para>
You should read and understand the information contained
within this part of the book while implementing a driver.
In some chapters, advanced usage is noted, that may be
skipped at first.
</para>
<para>
This part of the book only covers station and monitor mode
functionality, additional information required to implement
the other modes is covered in the second part of the book.
</para>
</partintro>
<chapter id="basics">
<title>Basic hardware handling</title>
<para>TBD</para>
<para>
This chapter shall contain information on getting a hw
struct allocated and registered with mac80211.
</para>
<para>
Since it is required to allocate rates/modes before registering
a hw struct, this chapter shall also contain information on setting
up the rate/mode structs.
</para>
<para>
Additionally, some discussion about the callbacks and
the general programming model should be in here, including
the definition of ieee80211_ops which will be referred to
a lot.
</para>
<para>
Finally, a discussion of hardware capabilities should be done
with references to other parts of the book.
</para>
<!-- intentionally multiple !F lines to get proper order -->
!Finclude/net/mac80211.h ieee80211_hw
!Finclude/net/mac80211.h ieee80211_hw_flags
!Finclude/net/mac80211.h SET_IEEE80211_DEV
!Finclude/net/mac80211.h SET_IEEE80211_PERM_ADDR
!Finclude/net/mac80211.h ieee80211_ops
!Finclude/net/mac80211.h ieee80211_alloc_hw
!Finclude/net/mac80211.h ieee80211_register_hw
!Finclude/net/mac80211.h ieee80211_get_tx_led_name
!Finclude/net/mac80211.h ieee80211_get_rx_led_name
!Finclude/net/mac80211.h ieee80211_get_assoc_led_name
!Finclude/net/mac80211.h ieee80211_get_radio_led_name
!Finclude/net/mac80211.h ieee80211_unregister_hw
!Finclude/net/mac80211.h ieee80211_free_hw
</chapter>
<chapter id="phy-handling">
<title>PHY configuration</title>
<para>TBD</para>
<para>
This chapter should describe PHY handling including
start/stop callbacks and the various structures used.
</para>
!Finclude/net/mac80211.h ieee80211_conf
!Finclude/net/mac80211.h ieee80211_conf_flags
</chapter>
<chapter id="iface-handling">
<title>Virtual interfaces</title>
<para>TBD</para>
<para>
This chapter should describe virtual interface basics
that are relevant to the driver (VLANs, MGMT etc are not.)
It should explain the use of the add_iface/remove_iface
callbacks as well as the interface configuration callbacks.
</para>
<para>Things related to AP mode should be discussed there.</para>
<para>
Things related to supporting multiple interfaces should be
in the appropriate chapter, a BIG FAT note should be here about
this though and the recommendation to allow only a single
interface in STA mode at first!
</para>
!Finclude/net/mac80211.h ieee80211_if_types
!Finclude/net/mac80211.h ieee80211_if_init_conf
!Finclude/net/mac80211.h ieee80211_if_conf
</chapter>
<chapter id="rx-tx">
<title>Receive and transmit processing</title>
<sect1>
<title>what should be here</title>
<para>TBD</para>
<para>
This should describe the receive and transmit
paths in mac80211/the drivers as well as
transmit status handling.
</para>
</sect1>
<sect1>
<title>Frame format</title>
!Pinclude/net/mac80211.h Frame format
</sect1>
<sect1>
<title>Alignment issues</title>
<para>TBD</para>
</sect1>
<sect1>
<title>Calling into mac80211 from interrupts</title>
!Pinclude/net/mac80211.h Calling mac80211 from interrupts
</sect1>
<sect1>
<title>functions/definitions</title>
!Finclude/net/mac80211.h ieee80211_rx_status
!Finclude/net/mac80211.h mac80211_rx_flags
!Finclude/net/mac80211.h ieee80211_tx_control
!Finclude/net/mac80211.h ieee80211_tx_status_flags
!Finclude/net/mac80211.h ieee80211_rx
!Finclude/net/mac80211.h ieee80211_rx_irqsafe
!Finclude/net/mac80211.h ieee80211_tx_status
!Finclude/net/mac80211.h ieee80211_tx_status_irqsafe
!Finclude/net/mac80211.h ieee80211_rts_get
!Finclude/net/mac80211.h ieee80211_rts_duration
!Finclude/net/mac80211.h ieee80211_ctstoself_get
!Finclude/net/mac80211.h ieee80211_ctstoself_duration
!Finclude/net/mac80211.h ieee80211_generic_frame_duration
!Finclude/net/mac80211.h ieee80211_get_hdrlen_from_skb
!Finclude/net/mac80211.h ieee80211_get_hdrlen
!Finclude/net/mac80211.h ieee80211_wake_queue
!Finclude/net/mac80211.h ieee80211_stop_queue
!Finclude/net/mac80211.h ieee80211_start_queues
!Finclude/net/mac80211.h ieee80211_stop_queues
!Finclude/net/mac80211.h ieee80211_wake_queues
</sect1>
</chapter>
<chapter id="filters">
<title>Frame filtering</title>
!Pinclude/net/mac80211.h Frame filtering
!Finclude/net/mac80211.h ieee80211_filter_flags
</chapter>
</part>
<part id="advanced">
<title>Advanced driver interface</title>
<partintro>
<para>
Information contained within this part of the book is
of interest only for advanced interaction of mac80211
with drivers to exploit more hardware capabilities and
improve performance.
</para>
</partintro>
<chapter id="hardware-crypto-offload">
<title>Hardware crypto acceleration</title>
!Pinclude/net/mac80211.h Hardware crypto acceleration
<!-- intentionally multiple !F lines to get proper order -->
!Finclude/net/mac80211.h set_key_cmd
!Finclude/net/mac80211.h ieee80211_key_conf
!Finclude/net/mac80211.h ieee80211_key_alg
!Finclude/net/mac80211.h ieee80211_key_flags
</chapter>
<chapter id="qos">
<title>Multiple queues and QoS support</title>
<para>TBD</para>
!Finclude/net/mac80211.h ieee80211_tx_queue_params
!Finclude/net/mac80211.h ieee80211_tx_queue_stats_data
!Finclude/net/mac80211.h ieee80211_tx_queue
</chapter>
<chapter id="AP">
<title>Access point mode support</title>
<para>TBD</para>
<para>Some parts of the if_conf should be discussed here instead</para>
<para>
Insert notes about VLAN interfaces with hw crypto here or
in the hw crypto chapter.
</para>
!Finclude/net/mac80211.h ieee80211_get_buffered_bc
!Finclude/net/mac80211.h ieee80211_beacon_get
</chapter>
<chapter id="multi-iface">
<title>Supporting multiple virtual interfaces</title>
<para>TBD</para>
<para>
Note: WDS with identical MAC address should almost always be OK
</para>
<para>
Insert notes about having multiple virtual interfaces with
different MAC addresses here, note which configurations are
supported by mac80211, add notes about supporting hw crypto
with it.
</para>
</chapter>
<chapter id="hardware-scan-offload">
<title>Hardware scan offload</title>
<para>TBD</para>
!Finclude/net/mac80211.h ieee80211_scan_completed
</chapter>
</part>
<part id="rate-control">
<title>Rate control interface</title>
<partintro>
<para>TBD</para>
<para>
This part of the book describes the rate control algorithm
interface and how it relates to mac80211 and drivers.
</para>
</partintro>
<chapter id="dummy">
<title>dummy chapter</title>
<para>TBD</para>
</chapter>
</part>
<part id="internal">
<title>Internals</title>
<partintro>
<para>TBD</para>
<para>
This part of the book describes mac80211 internals.
</para>
</partintro>
<chapter id="key-handling">
<title>Key handling</title>
<sect1>
<title>Key handling basics</title>
!Pnet/mac80211/key.c Key handling basics
</sect1>
<sect1>
<title>MORE TBD</title>
<para>TBD</para>
</sect1>
</chapter>
<chapter id="rx-processing">
<title>Receive processing</title>
<para>TBD</para>
</chapter>
<chapter id="tx-processing">
<title>Transmit processing</title>
<para>TBD</para>
</chapter>
<chapter id="sta-info">
<title>Station info handling</title>
<sect1>
<title>Programming information</title>
!Fnet/mac80211/sta_info.h sta_info
!Fnet/mac80211/sta_info.h ieee80211_sta_info_flags
</sect1>
<sect1>
<title>STA information lifetime rules</title>
!Pnet/mac80211/sta_info.c STA information lifetime rules
</sect1>
</chapter>
<chapter id="synchronisation">
<title>Synchronisation</title>
<para>TBD</para>
<para>Locking, lots of RCU</para>
</chapter>
</part>
</book>

Просмотреть файл

@ -203,14 +203,6 @@ Who: linuxppc-dev@ozlabs.org
---------------------------
What: sk98lin network driver
When: Feburary 2008
Why: In kernel tree version of driver is unmaintained. Sk98lin driver
replaced by the skge driver.
Who: Stephen Hemminger <shemminger@linux-foundation.org>
---------------------------
What: i386/x86_64 bzImage symlinks
When: April 2010
@ -221,8 +213,6 @@ Who: Thomas Gleixner <tglx@linutronix.de>
---------------------------
---------------------------
What: i2c-i810, i2c-prosavage and i2c-savage4
When: May 2008
Why: These drivers are superseded by i810fb, intelfb and savagefb.
@ -230,33 +220,6 @@ Who: Jean Delvare <khali@linux-fr.org>
---------------------------
What: bcm43xx wireless network driver
When: 2.6.26
Files: drivers/net/wireless/bcm43xx
Why: This driver's functionality has been replaced by the
mac80211-based b43 and b43legacy drivers.
Who: John W. Linville <linville@tuxdriver.com>
---------------------------
What: ieee80211 softmac wireless networking component
When: 2.6.26 (or after removal of bcm43xx and port of zd1211rw to mac80211)
Files: net/ieee80211/softmac
Why: No in-kernel drivers will depend on it any longer.
Who: John W. Linville <linville@tuxdriver.com>
---------------------------
What: rc80211-simple rate control algorithm for mac80211
When: 2.6.26
Files: net/mac80211/rc80211-simple.c
Why: This algorithm was provided for reference but always exhibited bad
responsiveness and performance and has some serious flaws. It has been
replaced by rc80211-pid.
Who: Stefano Brivio <stefano.brivio@polimi.it>
---------------------------
What (Why):
- include/linux/netfilter_ipv4/ipt_TOS.h ipt_tos.h header files
(superseded by xt_TOS/xt_tos target & match)

Просмотреть файл

@ -80,7 +80,7 @@ once you enable the radio, will depend on your hardware and driver combination.
e.g. With the BCM4318 on the Acer Aspire 5020 series:
ndiswrapper: Light blinks on when transmitting
bcm43xx/b43: Solid light, blinks off when transmitting
b43: Solid light, blinks off when transmitting
Wireless radio control is unconditionally enabled - all Acer laptops that support
acer-wmi come with built-in wireless. However, should you feel so inclined to

Просмотреть файл

@ -100,8 +100,6 @@ tuntap.txt
- TUN/TAP device driver, allowing user space Rx/Tx of packets.
vortex.txt
- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
wan-router.txt
- WAN router documentation
wavelan.txt
- AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver
x25.txt

Просмотреть файл

@ -1,89 +0,0 @@
BCM43xx Linux Driver Project
============================
Introduction
------------
Many of the wireless devices found in modern notebook computers are
based on the wireless chips produced by Broadcom. These devices have
been a problem for Linux users as there is no open-source driver
available. In addition, Broadcom has not released specifications
for the device, and driver availability has been limited to the
binary-only form used in the GPL versions of AP hardware such as the
Linksys WRT54G, and the Windows and OS X drivers. Before this project
began, the only way to use these devices were to use the Windows or
OS X drivers with either the Linuxant or ndiswrapper modules. There
is a strong penalty if this method is used as loading the binary-only
module "taints" the kernel, and no kernel developer will help diagnose
any kernel problems.
Development
-----------
This driver has been developed using
a clean-room technique that is described at
http://bcm-specs.sipsolutions.net/ReverseEngineeringProcess. For legal
reasons, none of the clean-room crew works on the on the Linux driver,
and none of the Linux developers sees anything but the specifications,
which are the ultimate product of the reverse-engineering group.
Software
--------
Since the release of the 2.6.17 kernel, the bcm43xx driver has been
distributed with the kernel source, and is prebuilt in most, if not
all, distributions. There is, however, additional software that is
required. The firmware used by the chip is the intellectual property
of Broadcom and they have not given the bcm43xx team redistribution
rights to this firmware. Since we cannot legally redistribute
the firmware we cannot include it with the driver. Furthermore, it
cannot be placed in the downloadable archives of any distributing
organization; therefore, the user is responsible for obtaining the
firmware and placing it in the appropriate location so that the driver
can find it when initializing.
To help with this process, the bcm43xx developers provide a separate
program named bcm43xx-fwcutter to "cut" the firmware out of a
Windows or OS X driver and write the extracted files to the proper
location. This program is usually provided with the distribution;
however, it may be downloaded from
http://developer.berlios.de/project/showfiles.php?group_id=4547
The firmware is available in two versions. V3 firmware is used with
the in-kernel bcm43xx driver that uses a software MAC layer called
SoftMAC, and will have a microcode revision of 0x127 or smaller. The
V4 firmware is used by an out-of-kernel driver employing a variation of
the Devicescape MAC layer known as d80211. Once bcm43xx-d80211 reaches
a satisfactory level of development, it will replace bcm43xx-softmac
in the kernel as it is much more flexible and powerful.
A source for the latest V3 firmware is
http://downloads.openwrt.org/sources/wl_apsta-3.130.20.0.o
Once this file is downloaded, the command
'bcm43xx-fwcutter -w <dir> <filename>'
will extract the microcode and write it to directory
<dir>. The correct directory will depend on your distribution;
however, most use '/lib/firmware'. Once this step is completed,
the bcm3xx driver should load when the system is booted. To see
any messages relating to the driver, issue the command 'dmesg |
grep bcm43xx' from a terminal window. If there are any problems,
please send that output to Bcm43xx-dev@lists.berlios.de.
Although the driver has been in-kernel since 2.6.17, the earliest
version is quite limited in its capability. Patches that include
all features of later versions are available for the stable kernel
versions from 2.6.18. These will be needed if you use a BCM4318,
or a PCI Express version (BCM4311 and BCM4312). In addition, if you
have an early BCM4306 and more than 1 GB RAM, your kernel will need
to be patched. These patches, which are being updated regularly,
are available at ftp://lwfinger.dynalias.org/patches. Look for
combined_2.6.YY.patch. Of course you will need kernel source downloaded
from kernel.org, or the source from your distribution.
If you build your own kernel, please enable CONFIG_BCM43XX_DEBUG
and CONFIG_IEEE80211_SOFTMAC_DEBUG. The log information provided is
essential for solving any problems.

Просмотреть файл

@ -1,621 +0,0 @@
------------------------------------------------------------------------------
Linux WAN Router Utilities Package
------------------------------------------------------------------------------
Version 2.2.1
Mar 28, 2001
Author: Nenad Corbic <ncorbic@sangoma.com>
Copyright (c) 1995-2001 Sangoma Technologies Inc.
------------------------------------------------------------------------------
INTRODUCTION
Wide Area Networks (WANs) are used to interconnect Local Area Networks (LANs)
and/or stand-alone hosts over vast distances with data transfer rates
significantly higher than those achievable with commonly used dial-up
connections.
Usually an external device called `WAN router' sitting on your local network
or connected to your machine's serial port provides physical connection to
WAN. Although router's job may be as simple as taking your local network
traffic, converting it to WAN format and piping it through the WAN link, these
devices are notoriously expensive, with prices as much as 2 - 5 times higher
then the price of a typical PC box.
Alternatively, considering robustness and multitasking capabilities of Linux,
an internal router can be built (most routers use some sort of stripped down
Unix-like operating system anyway). With a number of relatively inexpensive WAN
interface cards available on the market, a perfectly usable router can be
built for less than half a price of an external router. Yet a Linux box
acting as a router can still be used for other purposes, such as fire-walling,
running FTP, WWW or DNS server, etc.
This kernel module introduces the notion of a WAN Link Driver (WLD) to Linux
operating system and provides generic hardware-independent services for such
drivers. Why can existing Linux network device interface not be used for
this purpose? Well, it can. However, there are a few key differences between
a typical network interface (e.g. Ethernet) and a WAN link.
Many WAN protocols, such as X.25 and frame relay, allow for multiple logical
connections (known as `virtual circuits' in X.25 terminology) over a single
physical link. Each such virtual circuit may (and almost always does) lead
to a different geographical location and, therefore, different network. As a
result, it is the virtual circuit, not the physical link, that represents a
route and, therefore, a network interface in Linux terms.
To further complicate things, virtual circuits are usually volatile in nature
(excluding so called `permanent' virtual circuits or PVCs). With almost no
time required to set up and tear down a virtual circuit, it is highly desirable
to implement on-demand connections in order to minimize network charges. So
unlike a typical network driver, the WAN driver must be able to handle multiple
network interfaces and cope as multiple virtual circuits come into existence
and go away dynamically.
Last, but not least, WAN configuration is much more complex than that of say
Ethernet and may well amount to several dozens of parameters. Some of them
are "link-wide" while others are virtual circuit-specific. The same holds
true for WAN statistics which is by far more extensive and extremely useful
when troubleshooting WAN connections. Extending the ifconfig utility to suit
these needs may be possible, but does not seem quite reasonable. Therefore, a
WAN configuration utility and corresponding application programmer's interface
is needed for this purpose.
Most of these problems are taken care of by this module. Its goal is to
provide a user with more-or-less standard look and feel for all WAN devices and
assist a WAN device driver writer by providing common services, such as:
o User-level interface via /proc file system
o Centralized configuration
o Device management (setup, shutdown, etc.)
o Network interface management (dynamic creation/destruction)
o Protocol encapsulation/decapsulation
To ba able to use the Linux WAN Router you will also need a WAN Tools package
available from
ftp.sangoma.com/pub/linux/current_wanpipe/wanpipe-X.Y.Z.tgz
where vX.Y.Z represent the wanpipe version number.
For technical questions and/or comments please e-mail to ncorbic@sangoma.com.
For general inquiries please contact Sangoma Technologies Inc. by
Hotline: 1-800-388-2475 (USA and Canada, toll free)
Phone: (905) 474-1990 ext: 106
Fax: (905) 474-9223
E-mail: dm@sangoma.com (David Mandelstam)
WWW: http://www.sangoma.com
INSTALLATION
Please read the WanpipeForLinux.pdf manual on how to
install the WANPIPE tools and drivers properly.
After installing wanpipe package: /usr/local/wanrouter/doc.
On the ftp.sangoma.com : /linux/current_wanpipe/doc
COPYRIGHT AND LICENSING INFORMATION
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; either version 2, or (at your option) any later version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 675 Mass
Ave, Cambridge, MA 02139, USA.
ACKNOWLEDGEMENTS
This product is based on the WANPIPE(tm) Multiprotocol WAN Router developed
by Sangoma Technologies Inc. for Linux 2.0.x and 2.2.x. Success of the WANPIPE
together with the next major release of Linux kernel in summer 1996 commanded
adequate changes to the WANPIPE code to take full advantage of new Linux
features.
Instead of continuing developing proprietary interface tied to Sangoma WAN
cards, we decided to separate all hardware-independent code into a separate
module and defined two levels of interfaces - one for user-level applications
and another for kernel-level WAN drivers. WANPIPE is now implemented as a
WAN driver compliant with the WAN Link Driver interface. Also a general
purpose WAN configuration utility and a set of shell scripts was developed to
support WAN router at the user level.
Many useful ideas concerning hardware-independent interface implementation
were given by Mike McLagan <mike.mclagan@linux.org> and his implementation
of the Frame Relay router and drivers for Sangoma cards (dlci/sdla).
With the new implementation of the APIs being incorporated into the WANPIPE,
a special thank goes to Alan Cox in providing insight into BSD sockets.
Special thanks to all the WANPIPE users who performed field-testing, reported
bugs and made valuable comments and suggestions that help us to improve this
product.
NEW IN THIS RELEASE
o Updated the WANCFG utility
Calls the pppconfig to configure the PPPD
for async connections.
o Added the PPPCONFIG utility
Used to configure the PPPD daemon for the
WANPIPE Async PPP and standard serial port.
The wancfg calls the pppconfig to configure
the pppd.
o Fixed the PCI autodetect feature.
The SLOT 0 was used as an autodetect option
however, some high end PC's slot numbers start
from 0.
o This release has been tested with the new backupd
daemon release.
PRODUCT COMPONENTS AND RELATED FILES
/etc: (or user defined)
wanpipe1.conf default router configuration file
/lib/modules/X.Y.Z/misc:
wanrouter.o router kernel loadable module
af_wanpipe.o wanpipe api socket module
/lib/modules/X.Y.Z/net:
sdladrv.o Sangoma SDLA support module
wanpipe.o Sangoma WANPIPE(tm) driver module
/proc/net/wanrouter
Config reads current router configuration
Status reads current router status
{name} reads WAN driver statistics
/usr/sbin:
wanrouter wanrouter start-up script
wanconfig wanrouter configuration utility
sdladump WANPIPE adapter memory dump utility
fpipemon Monitor for Frame Relay
cpipemon Monitor for Cisco HDLC
ppipemon Monitor for PPP
xpipemon Monitor for X25
wpkbdmon WANPIPE keyboard led monitor/debugger
/usr/local/wanrouter:
README this file
COPYING GNU General Public License
Setup installation script
Filelist distribution definition file
wanrouter.rc meta-configuration file
(used by the Setup and wanrouter script)
/usr/local/wanrouter/doc:
wanpipeForLinux.pdf WAN Router User's Manual
/usr/local/wanrouter/patches:
wanrouter-v2213.gz patch for Linux kernels 2.2.11 up to 2.2.13.
wanrouter-v2214.gz patch for Linux kernel 2.2.14.
wanrouter-v2215.gz patch for Linux kernels 2.2.15 to 2.2.17.
wanrouter-v2218.gz patch for Linux kernels 2.2.18 and up.
wanrouter-v240.gz patch for Linux kernel 2.4.0.
wanrouter-v242.gz patch for Linux kernel 2.4.2 and up.
wanrouter-v2034.gz patch for Linux kernel 2.0.34
wanrouter-v2036.gz patch for Linux kernel 2.0.36 and up.
/usr/local/wanrouter/patches/kdrivers:
Sources of the latest WANPIPE device drivers.
These are used to UPGRADE the linux kernel to the newest
version if the kernel source has already been patched with
WANPIPE drivers.
/usr/local/wanrouter/samples:
interface sample interface configuration file
wanpipe1.cpri CHDLC primary port
wanpipe2.csec CHDLC secondary port
wanpipe1.fr Frame Relay protocol
wanpipe1.ppp PPP protocol )
wanpipe1.asy CHDLC ASYNC protocol
wanpipe1.x25 X25 protocol
wanpipe1.stty Sync TTY driver (Used by Kernel PPPD daemon)
wanpipe1.atty Async TTY driver (Used by Kernel PPPD daemon)
wanrouter.rc sample meta-configuration file
/usr/local/wanrouter/util:
* wan-tools utilities source code
/usr/local/wanrouter/api/x25:
* x25 api sample programs.
/usr/local/wanrouter/api/chdlc:
* chdlc api sample programs.
/usr/local/wanrouter/api/fr:
* fr api sample programs.
/usr/local/wanrouter/config/wancfg:
wancfg WANPIPE GUI configuration program.
Creates wanpipe#.conf files.
/usr/local/wanrouter/config/cfgft1:
cfgft1 GUI CSU/DSU configuration program.
/usr/include/linux:
wanrouter.h router API definitions
wanpipe.h WANPIPE API definitions
sdladrv.h SDLA support module API definitions
sdlasfm.h SDLA firmware module definitions
if_wanpipe.h WANPIPE Socket definitions
sdlapci.h WANPIPE PCI definitions
/usr/src/linux/net/wanrouter:
* wanrouter source code
/var/log:
wanrouter wanrouter start-up log (created by the Setup script)
/var/lock: (or /var/lock/subsys for RedHat)
wanrouter wanrouter lock file (created by the Setup script)
/usr/local/wanrouter/firmware:
fr514.sfm Frame relay firmware for Sangoma S508/S514 card
cdual514.sfm Dual Port Cisco HDLC firmware for Sangoma S508/S514 card
ppp514.sfm PPP Firmware for Sangoma S508 and S514 cards
x25_508.sfm X25 Firmware for Sangoma S508 card.
REVISION HISTORY
1.0.0 December 31, 1996 Initial version
1.0.1 January 30, 1997 Status and statistics can be read via /proc
filesystem entries.
1.0.2 April 30, 1997 Added UDP management via monitors.
1.0.3 June 3, 1997 UDP management for multiple boards using Frame
Relay and PPP
Enabled continuous transmission of Configure
Request Packet for PPP (for 508 only)
Connection Timeout for PPP changed from 900 to 0
Flow Control Problem fixed for Frame Relay
1.0.4 July 10, 1997 S508/FT1 monitoring capability in fpipemon and
ppipemon utilities.
Configurable TTL for UDP packets.
Multicast and Broadcast IP source addresses are
silently discarded.
1.0.5 July 28, 1997 Configurable T391,T392,N391,N392,N393 for Frame
Relay in router.conf.
Configurable Memory Address through router.conf
for Frame Relay, PPP and X.25. (commenting this
out enables auto-detection).
Fixed freeing up received buffers using kfree()
for Frame Relay and X.25.
Protect sdla_peek() by calling save_flags(),
cli() and restore_flags().
Changed number of Trace elements from 32 to 20
Added DLCI specific data monitoring in FPIPEMON.
2.0.0 Nov 07, 1997 Implemented protection of RACE conditions by
critical flags for FRAME RELAY and PPP.
DLCI List interrupt mode implemented.
IPX support in FRAME RELAY and PPP.
IPX Server Support (MARS)
More driver specific stats included in FPIPEMON
and PIPEMON.
2.0.1 Nov 28, 1997 Bug Fixes for version 2.0.0.
Protection of "enable_irq()" while
"disable_irq()" has been enabled from any other
routine (for Frame Relay, PPP and X25).
Added additional Stats for Fpipemon and Ppipemon
Improved Load Sharing for multiple boards
2.0.2 Dec 09, 1997 Support for PAP and CHAP for ppp has been
implemented.
2.0.3 Aug 15, 1998 New release supporting Cisco HDLC, CIR for Frame
relay, Dynamic IP assignment for PPP and Inverse
Arp support for Frame-relay. Man Pages are
included for better support and a new utility
for configuring FT1 cards.
2.0.4 Dec 09, 1998 Dual Port support for Cisco HDLC.
Support for HDLC (LAPB) API.
Supports BiSync Streaming code for S502E
and S503 cards.
Support for Streaming HDLC API.
Provides a BSD socket interface for
creating applications using BiSync
streaming.
2.0.5 Aug 04, 1999 CHDLC initialization bug fix.
PPP interrupt driven driver:
Fix to the PPP line hangup problem.
New PPP firmware
Added comments to the startup SYSTEM ERROR messages
Xpipemon debugging application for the X25 protocol
New USER_MANUAL.txt
Fixed the odd boundary 4byte writes to the board.
BiSync Streaming code has been taken out.
Available as a patch.
Streaming HDLC API has been taken out.
Available as a patch.
2.0.6 Aug 17, 1999 Increased debugging in statup scripts
Fixed installation bugs from 2.0.5
Kernel patch works for both 2.2.10 and 2.2.11 kernels.
There is no functional difference between the two packages
2.0.7 Aug 26, 1999 o Merged X25API code into WANPIPE.
o Fixed a memory leak for X25API
o Updated the X25API code for 2.2.X kernels.
o Improved NEM handling.
2.1.0 Oct 25, 1999 o New code for S514 PCI Card
o New CHDLC and Frame Relay drivers
o PPP and X25 are not supported in this release
2.1.1 Nov 30, 1999 o PPP support for S514 PCI Cards
2.1.3 Apr 06, 2000 o Socket based x25api
o Socket based chdlc api
o Socket based fr api
o Dual Port Receive only CHDLC support.
o Asynchronous CHDLC support (Secondary Port)
o cfgft1 GUI csu/dsu configurator
o wancfg GUI configuration file
configurator.
o Architectural directory changes.
beta-2.1.4 Jul 2000 o Dynamic interface configuration:
Network interfaces reflect the state
of protocol layer. If the protocol becomes
disconnected, driver will bring down
the interface. Once the protocol reconnects
the interface will be brought up.
Note: This option is turned off by default.
o Dynamic wanrouter setup using 'wanconfig':
wanconfig utility can be used to
shutdown,restart,start or reconfigure
a virtual circuit dynamically.
Frame Relay: Each DLCI can be:
created,stopped,restarted and reconfigured
dynamically using wanconfig.
ex: wanconfig card wanpipe1 dev wp1_fr16 up
o Wanrouter startup via command line arguments:
wanconfig also supports wanrouter startup via command line
arguments. Thus, there is no need to create a wanpipe#.conf
configuration file.
o Socket based x25api update/bug fixes.
Added support for LCN numbers greater than 255.
Option to pass up modem messages.
Provided a PCI IRQ check, so a single S514
card is guaranteed to have a non-sharing interrupt.
o Fixes to the wancfg utility.
o New FT1 debugging support via *pipemon utilities.
o Frame Relay ARP support Enabled.
beta3-2.1.4 Jul 2000 o X25 M_BIT Problem fix.
o Added the Multi-Port PPP
Updated utilities for the Multi-Port PPP.
2.1.4 Aut 2000
o In X25API:
Maximum packet an application can send
to the driver has been extended to 4096 bytes.
Fixed the x25 startup bug. Enable
communications only after all interfaces
come up. HIGH SVC/PVC is used to calculate
the number of channels.
Enable protocol only after all interfaces
are enabled.
o Added an extra state to the FT1 config, kernel module.
o Updated the pipemon debuggers.
o Blocked the Multi-Port PPP from running on kernels
2.2.16 or greater, due to syncppp kernel module
change.
beta1-2.1.5 Nov 15 2000
o Fixed the MultiPort PPP Support for kernels 2.2.16 and above.
2.2.X kernels only
o Secured the driver UDP debugging calls
- All illegal network debugging calls are reported to
the log.
- Defined a set of allowed commands, all other denied.
o Cpipemon
- Added set FT1 commands to the cpipemon. Thus CSU/DSU
configuration can be performed using cpipemon.
All systems that cannot run cfgft1 GUI utility should
use cpipemon to configure the on board CSU/DSU.
o Keyboard Led Monitor/Debugger
- A new utility /usr/sbin/wpkbdmon uses keyboard leds
to convey operational statistic information of the
Sangoma WANPIPE cards.
NUM_LOCK = Line State (On=connected, Off=disconnected)
CAPS_LOCK = Tx data (On=transmitting, Off=no tx data)
SCROLL_LOCK = Rx data (On=receiving, Off=no rx data
o Hardware probe on module load and dynamic device allocation
- During WANPIPE module load, all Sangoma cards are probed
and found information is printed in the /var/log/messages.
- If no cards are found, the module load fails.
- Appropriate number of devices are dynamically loaded
based on the number of Sangoma cards found.
Note: The kernel configuration option
CONFIG_WANPIPE_CARDS has been taken out.
o Fixed the Frame Relay and Chdlc network interfaces so they are
compatible with libpcap libraries. Meaning, tcpdump, snort,
ethereal, and all other packet sniffers and debuggers work on
all WANPIPE network interfaces.
- Set the network interface encoding type to ARPHRD_PPP.
This tell the sniffers that data obtained from the
network interface is in pure IP format.
Fix for 2.2.X kernels only.
o True interface encoding option for Frame Relay and CHDLC
- The above fix sets the network interface encoding
type to ARPHRD_PPP, however some customers use
the encoding interface type to determine the
protocol running. Therefore, the TURE ENCODING
option will set the interface type back to the
original value.
NOTE: If this option is used with Frame Relay and CHDLC
libpcap library support will be broken.
i.e. tcpdump will not work.
Fix for 2.2.x Kernels only.
o Ethernet Bridgind over Frame Relay
- The Frame Relay bridging has been developed by
Kristian Hoffmann and Mark Wells.
- The Linux kernel bridge is used to send ethernet
data over the frame relay links.
For 2.2.X Kernels only.
o Added extensive 2.0.X support. Most new features of
2.1.5 for protocols Frame Relay, PPP and CHDLC are
supported under 2.0.X kernels.
beta1-2.2.0 Dec 30 2000
o Updated drivers for 2.4.X kernels.
o Updated drivers for SMP support.
o X25API is now able to share PCI interrupts.
o Took out a general polling routine that was used
only by X25API.
o Added appropriate locks to the dynamic reconfiguration
code.
o Fixed a bug in the keyboard debug monitor.
beta2-2.2.0 Jan 8 2001
o Patches for 2.4.0 kernel
o Patches for 2.2.18 kernel
o Minor updates to PPP and CHLDC drivers.
Note: No functional difference.
beta3-2.2.9 Jan 10 2001
o I missed the 2.2.18 kernel patches in beta2-2.2.0
release. They are included in this release.
Stable Release
2.2.0 Feb 01 2001
o Bug fix in wancfg GUI configurator.
The edit function didn't work properly.
bata1-2.2.1 Feb 09 2001
o WANPIPE TTY Driver emulation.
Two modes of operation Sync and Async.
Sync: Using the PPPD daemon, kernel SyncPPP layer
and the Wanpipe sync TTY driver: a PPP protocol
connection can be established via Sangoma adapter, over
a T1 leased line.
The 2.4.0 kernel PPP layer supports MULTILINK
protocol, that can be used to bundle any number of Sangoma
adapters (T1 lines) into one, under a single IP address.
Thus, efficiently obtaining multiple T1 throughput.
NOTE: The remote side must also implement MULTILINK PPP
protocol.
Async:Using the PPPD daemon, kernel AsyncPPP layer
and the WANPIPE async TTY driver: a PPP protocol
connection can be established via Sangoma adapter and
a modem, over a telephone line.
Thus, the WANPIPE async TTY driver simulates a serial
TTY driver that would normally be used to interface the
MODEM to the linux kernel.
o WANPIPE PPP Backup Utility
This utility will monitor the state of the PPP T1 line.
In case of failure, a dial up connection will be established
via pppd daemon, ether via a serial tty driver (serial port),
or a WANPIPE async TTY driver (in case serial port is unavailable).
Furthermore, while in dial up mode, the primary PPP T1 link
will be monitored for signs of life.
If the PPP T1 link comes back to life, the dial up connection
will be shutdown and T1 line re-established.
o New Setup installation script.
Option to UPGRADE device drivers if the kernel source has
already been patched with WANPIPE.
Option to COMPILE WANPIPE modules against the currently
running kernel, thus no need for manual kernel and module
re-compilation.
o Updates and Bug Fixes to wancfg utility.
bata2-2.2.1 Feb 20 2001
o Bug fixes to the CHDLC device drivers.
The driver had compilation problems under kernels
2.2.14 or lower.
o Bug fixes to the Setup installation script.
The device drivers compilation options didn't work
properly.
o Update to the wpbackupd daemon.
Optimized the cross-over times, between the primary
link and the backup dialup.
beta3-2.2.1 Mar 02 2001
o Patches for 2.4.2 kernel.
o Bug fixes to util/ make files.
o Bug fixes to the Setup installation script.
o Took out the backupd support and made it into
as separate package.
beta4-2.2.1 Mar 12 2001
o Fix to the Frame Relay Device driver.
IPSAC sends a packet of zero length
header to the frame relay driver. The
driver tries to push its own 2 byte header
into the packet, which causes the driver to
crash.
o Fix the WANPIPE re-configuration code.
Bug was found by trying to run the cfgft1 while the
interface was already running.
o Updates to cfgft1.
Writes a wanpipe#.cfgft1 configuration file
once the CSU/DSU is configured. This file can
holds the current CSU/DSU configuration.
>>>>>> END OF README <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<

Просмотреть файл

@ -840,15 +840,6 @@ L: linux-wireless@vger.kernel.org
W: http://linuxwireless.org/en/users/Drivers/b43
S: Maintained
BCM43XX WIRELESS DRIVER (SOFTMAC BASED VERSION)
P: Larry Finger
M: Larry.Finger@lwfinger.net
P: Stefano Brivio
M: stefano.brivio@polimi.it
L: linux-wireless@vger.kernel.org
W: http://bcm43xx.berlios.de/
S: Obsolete
BEFS FILE SYSTEM
P: Sergey S. Kostyliov
M: rathamahata@php4.ru
@ -3479,7 +3470,7 @@ P: Vlad Yasevich
M: vladislav.yasevich@hp.com
P: Sridhar Samudrala
M: sri@us.ibm.com
L: lksctp-developers@lists.sourceforge.net
L: linux-sctp@vger.kernel.org
W: http://lksctp.sourceforge.net
S: Supported
@ -3613,12 +3604,6 @@ M: mhoffman@lightlink.com
L: lm-sensors@lm-sensors.org
S: Maintained
SOFTMAC LAYER (IEEE 802.11)
P: Daniel Drake
M: dsd@gentoo.org
L: linux-wireless@vger.kernel.org
S: Obsolete
SOFTWARE RAID (Multiple Disks) SUPPORT
P: Ingo Molnar
M: mingo@redhat.com

Просмотреть файл

@ -294,7 +294,7 @@ simeth_device_event(struct notifier_block *this,unsigned long event, void *ptr)
return NOTIFY_DONE;
}
if (dev->nd_net != &init_net)
if (dev_net(dev) != &init_net)
return NOTIFY_DONE;
if ( event != NETDEV_UP && event != NETDEV_DOWN ) return NOTIFY_DONE;

Просмотреть файл

@ -138,7 +138,7 @@ static int __devinit ep8248e_mdio_probe(struct of_device *ofdev,
bus->name = "ep8248e-mdio-bitbang";
bus->dev = &ofdev->dev;
bus->id = res.start;
snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
return mdiobus_register(bus);
}

Просмотреть файл

@ -241,7 +241,7 @@ static int __devinit gpio_mdio_probe(struct of_device *ofdev,
new_bus->reset = &gpio_mdio_reset;
prop = of_get_property(np, "reg", NULL);
new_bus->id = *prop;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", *prop);
new_bus->priv = priv;
new_bus->phy_mask = 0;

Просмотреть файл

@ -341,7 +341,7 @@ static int __init gfar_of_init(void)
goto unreg;
}
gfar_data.bus_id = 0;
snprintf(gfar_data.bus_id, MII_BUS_ID_SIZE, "0");
gfar_data.phy_id = fixed_link[0];
} else {
phy = of_find_node_by_phandle(*ph);
@ -362,7 +362,8 @@ static int __init gfar_of_init(void)
}
gfar_data.phy_id = *id;
gfar_data.bus_id = res.start;
snprintf(gfar_data.bus_id, MII_BUS_ID_SIZE, "%x",
res.start);
of_node_put(phy);
of_node_put(mdio);

Просмотреть файл

@ -538,11 +538,9 @@ CONFIG_CTC=m
# CONFIG_SMSGIUCV is not set
# CONFIG_CLAW is not set
CONFIG_QETH=y
#
# Gigabit Ethernet default settings
#
# CONFIG_QETH_IPV6 is not set
CONFIG_QETH_L2=y
CONFIG_QETH_L3=y
CONFIG_QETH_IPV6=y
CONFIG_CCWGROUP=y
# CONFIG_PPP is not set
# CONFIG_SLIP is not set

Просмотреть файл

@ -437,7 +437,7 @@ static inline void dump_skb (char * prefix, unsigned int vc, struct sk_buff * sk
/* see limitations under Hardware Features */
static inline int check_area (void * start, size_t length) {
static int check_area (void * start, size_t length) {
// assumes length > 0
const u32 fourmegmask = -1 << 22;
const u32 twofivesixmask = -1 << 8;
@ -456,7 +456,7 @@ static inline int check_area (void * start, size_t length) {
/********** free an skb (as per ATM device driver documentation) **********/
static inline void amb_kfree_skb (struct sk_buff * skb) {
static void amb_kfree_skb (struct sk_buff * skb) {
if (ATM_SKB(skb)->vcc->pop) {
ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
} else {
@ -466,7 +466,7 @@ static inline void amb_kfree_skb (struct sk_buff * skb) {
/********** TX completion **********/
static inline void tx_complete (amb_dev * dev, tx_out * tx) {
static void tx_complete (amb_dev * dev, tx_out * tx) {
tx_simple * tx_descr = bus_to_virt (tx->handle);
struct sk_buff * skb = tx_descr->skb;
@ -643,7 +643,7 @@ static int command_do (amb_dev * dev, command * cmd) {
/********** TX queue pair **********/
static inline int tx_give (amb_dev * dev, tx_in * tx) {
static int tx_give (amb_dev * dev, tx_in * tx) {
amb_txq * txq = &dev->txq;
unsigned long flags;
@ -675,7 +675,7 @@ static inline int tx_give (amb_dev * dev, tx_in * tx) {
}
}
static inline int tx_take (amb_dev * dev) {
static int tx_take (amb_dev * dev) {
amb_txq * txq = &dev->txq;
unsigned long flags;
@ -703,7 +703,7 @@ static inline int tx_take (amb_dev * dev) {
/********** RX queue pairs **********/
static inline int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) {
static int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) {
amb_rxq * rxq = &dev->rxq[pool];
unsigned long flags;
@ -728,7 +728,7 @@ static inline int rx_give (amb_dev * dev, rx_in * rx, unsigned char pool) {
}
}
static inline int rx_take (amb_dev * dev, unsigned char pool) {
static int rx_take (amb_dev * dev, unsigned char pool) {
amb_rxq * rxq = &dev->rxq[pool];
unsigned long flags;
@ -761,7 +761,7 @@ static inline int rx_take (amb_dev * dev, unsigned char pool) {
/********** RX Pool handling **********/
/* pre: buffers_wanted = 0, post: pending = 0 */
static inline void drain_rx_pool (amb_dev * dev, unsigned char pool) {
static void drain_rx_pool (amb_dev * dev, unsigned char pool) {
amb_rxq * rxq = &dev->rxq[pool];
PRINTD (DBG_FLOW|DBG_POOL, "drain_rx_pool %p %hu", dev, pool);
@ -796,7 +796,7 @@ static void drain_rx_pools (amb_dev * dev) {
drain_rx_pool (dev, pool);
}
static inline void fill_rx_pool (amb_dev * dev, unsigned char pool,
static void fill_rx_pool (amb_dev * dev, unsigned char pool,
gfp_t priority)
{
rx_in rx;
@ -846,7 +846,7 @@ static void fill_rx_pools (amb_dev * dev) {
/********** enable host interrupts **********/
static inline void interrupts_on (amb_dev * dev) {
static void interrupts_on (amb_dev * dev) {
wr_plain (dev, offsetof(amb_mem, interrupt_control),
rd_plain (dev, offsetof(amb_mem, interrupt_control))
| AMB_INTERRUPT_BITS);
@ -854,7 +854,7 @@ static inline void interrupts_on (amb_dev * dev) {
/********** disable host interrupts **********/
static inline void interrupts_off (amb_dev * dev) {
static void interrupts_off (amb_dev * dev) {
wr_plain (dev, offsetof(amb_mem, interrupt_control),
rd_plain (dev, offsetof(amb_mem, interrupt_control))
&~ AMB_INTERRUPT_BITS);

Просмотреть файл

@ -424,7 +424,7 @@ static inline void FLUSH_RX_CHANNEL (hrz_dev * dev, u16 channel) {
return;
}
static inline void WAIT_FLUSH_RX_COMPLETE (hrz_dev * dev) {
static void WAIT_FLUSH_RX_COMPLETE (hrz_dev * dev) {
while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & FLUSH_CHANNEL)
;
return;
@ -435,7 +435,7 @@ static inline void SELECT_RX_CHANNEL (hrz_dev * dev, u16 channel) {
return;
}
static inline void WAIT_UPDATE_COMPLETE (hrz_dev * dev) {
static void WAIT_UPDATE_COMPLETE (hrz_dev * dev) {
while (rd_regw (dev, RX_CHANNEL_PORT_OFF) & RX_CHANNEL_UPDATE_IN_PROGRESS)
;
return;
@ -796,7 +796,7 @@ static void hrz_change_vc_qos (ATM_RXER * rxer, MAAL_QOS * qos) {
/********** free an skb (as per ATM device driver documentation) **********/
static inline void hrz_kfree_skb (struct sk_buff * skb) {
static void hrz_kfree_skb (struct sk_buff * skb) {
if (ATM_SKB(skb)->vcc->pop) {
ATM_SKB(skb)->vcc->pop (ATM_SKB(skb)->vcc, skb);
} else {
@ -1076,7 +1076,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
/********** handle RX bus master complete events **********/
static inline void rx_bus_master_complete_handler (hrz_dev * dev) {
static void rx_bus_master_complete_handler (hrz_dev * dev) {
if (test_bit (rx_busy, &dev->flags)) {
rx_schedule (dev, 1);
} else {
@ -1089,7 +1089,7 @@ static inline void rx_bus_master_complete_handler (hrz_dev * dev) {
/********** (queue to) become the next TX thread **********/
static inline int tx_hold (hrz_dev * dev) {
static int tx_hold (hrz_dev * dev) {
PRINTD (DBG_TX, "sleeping at tx lock %p %lu", dev, dev->flags);
wait_event_interruptible(dev->tx_queue, (!test_and_set_bit(tx_busy, &dev->flags)));
PRINTD (DBG_TX, "woken at tx lock %p %lu", dev, dev->flags);
@ -1232,7 +1232,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
/********** handle TX bus master complete events **********/
static inline void tx_bus_master_complete_handler (hrz_dev * dev) {
static void tx_bus_master_complete_handler (hrz_dev * dev) {
if (test_bit (tx_busy, &dev->flags)) {
tx_schedule (dev, 1);
} else {
@ -1246,7 +1246,7 @@ static inline void tx_bus_master_complete_handler (hrz_dev * dev) {
/********** move RX Q pointer to next item in circular buffer **********/
// called only from IRQ sub-handler
static inline u32 rx_queue_entry_next (hrz_dev * dev) {
static u32 rx_queue_entry_next (hrz_dev * dev) {
u32 rx_queue_entry;
spin_lock (&dev->mem_lock);
rx_queue_entry = rd_mem (dev, &dev->rx_q_entry->entry);
@ -1270,7 +1270,7 @@ static inline void rx_disabled_handler (hrz_dev * dev) {
/********** handle RX data received by device **********/
// called from IRQ handler
static inline void rx_data_av_handler (hrz_dev * dev) {
static void rx_data_av_handler (hrz_dev * dev) {
u32 rx_queue_entry;
u32 rx_queue_entry_flags;
u16 rx_len;
@ -1394,7 +1394,7 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
irq_ok = 0;
while ((int_source = rd_regl (dev, INT_SOURCE_REG_OFF)
& INTERESTING_INTERRUPTS)) {
// In the interests of fairness, the (inline) handlers below are
// In the interests of fairness, the handlers below are
// called in sequence and without immediate return to the head of
// the while loop. This is only of issue for slow hosts (or when
// debugging messages are on). Really slow hosts may find a fast
@ -1458,7 +1458,7 @@ static void do_housekeeping (unsigned long arg) {
/********** find an idle channel for TX and set it up **********/
// called with tx_busy set
static inline short setup_idle_tx_channel (hrz_dev * dev, hrz_vcc * vcc) {
static short setup_idle_tx_channel (hrz_dev * dev, hrz_vcc * vcc) {
unsigned short idle_channels;
short tx_channel = -1;
unsigned int spin_count;
@ -1777,13 +1777,13 @@ static void hrz_reset (const hrz_dev * dev) {
/********** read the burnt in address **********/
static inline void WRITE_IT_WAIT (const hrz_dev *dev, u32 ctrl)
static void WRITE_IT_WAIT (const hrz_dev *dev, u32 ctrl)
{
wr_regl (dev, CONTROL_0_REG, ctrl);
udelay (5);
}
static inline void CLOCK_IT (const hrz_dev *dev, u32 ctrl)
static void CLOCK_IT (const hrz_dev *dev, u32 ctrl)
{
// DI must be valid around rising SK edge
WRITE_IT_WAIT(dev, ctrl & ~SEEPROM_SK);

Просмотреть файл

@ -115,7 +115,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt,
struct aoe_hdr *h;
u32 n;
if (ifp->nd_net != &init_net)
if (dev_net(ifp) != &init_net)
goto exit;
skb = skb_share_check(skb, GFP_ATOMIC);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -966,8 +966,8 @@ static int __devinit rtl8139_init_one (struct pci_dev *pdev,
addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
for (i = 0; i < 3; i++)
((u16 *) (dev->dev_addr))[i] =
le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
((__le16 *) (dev->dev_addr))[i] =
cpu_to_le16(read_eeprom (ioaddr, i + 7, addr_len));
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
/* The Rtl8139-specific entries in the device structure. */
@ -1373,8 +1373,8 @@ static void rtl8139_hw_start (struct net_device *dev)
/* unlock Config[01234] and BMCR register writes */
RTL_W8_F (Cfg9346, Cfg9346_Unlock);
/* Restore our idea of the MAC address. */
RTL_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (dev->dev_addr + 0)));
RTL_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (dev->dev_addr + 4)));
RTL_W32_F (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
RTL_W32_F (MAC0 + 4, le16_to_cpu (*(__le16 *) (dev->dev_addr + 4)));
/* Must enable Tx/Rx before setting transfer thresholds! */
RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
@ -1945,7 +1945,7 @@ static int rtl8139_rx(struct net_device *dev, struct rtl8139_private *tp,
rmb();
/* read size+status of next frame from DMA ring buffer */
rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset));
rx_status = le32_to_cpu (*(__le32 *) (rx_ring + ring_offset));
rx_size = rx_status >> 16;
pkt_size = rx_size - 4;

Просмотреть файл

@ -48,14 +48,16 @@ EXPORT_SYMBOL(__alloc_ei_netdev);
#if defined(MODULE)
int init_module(void)
static int __init ns8390_module_init(void)
{
return 0;
}
void cleanup_module(void)
static void __exit ns8390_module_exit(void)
{
}
module_init(ns8390_module_init);
module_exit(ns8390_module_exit);
#endif /* MODULE */
MODULE_LICENSE("GPL");

Просмотреть файл

@ -467,6 +467,13 @@ config SNI_82596
Say Y here to support the on-board Intel 82596 ethernet controller
built into SNI RM machines.
config KORINA
tristate "Korina (IDT RC32434) Ethernet support"
depends on NET_ETHERNET && MIKROTIK_RB500
help
If you have a Mikrotik RouterBoard 500 or IDT RC32434
based system say Y. Otherwise say N.
config MIPS_JAZZ_SONIC
tristate "MIPS JAZZ onboard SONIC Ethernet support"
depends on MACH_JAZZ
@ -1431,7 +1438,7 @@ config CS89x0
config TC35815
tristate "TOSHIBA TC35815 Ethernet support"
depends on NET_PCI && PCI && MIPS
select MII
select PHYLIB
config EEPRO100
tristate "EtherExpressPro/100 support (eepro100, original Becker driver)"
@ -2220,93 +2227,6 @@ config SKY2_DEBUG
If unsure, say N.
config SK98LIN
tristate "Marvell Yukon Chipset / SysKonnect SK-98xx Support (DEPRECATED)"
depends on PCI
---help---
Say Y here if you have a Marvell Yukon or SysKonnect SK-98xx/SK-95xx
compliant Gigabit Ethernet Adapter.
This driver supports the original Yukon chipset. This driver is
deprecated and will be removed from the kernel in the near future,
it has been replaced by the skge driver. skge is cleaner and
seems to work better.
This driver does not support the newer Yukon2 chipset. A separate
driver, sky2, is provided to support Yukon2-based adapters.
The following adapters are supported by this driver:
- 3Com 3C940 Gigabit LOM Ethernet Adapter
- 3Com 3C941 Gigabit LOM Ethernet Adapter
- Allied Telesyn AT-2970LX Gigabit Ethernet Adapter
- Allied Telesyn AT-2970LX/2SC Gigabit Ethernet Adapter
- Allied Telesyn AT-2970SX Gigabit Ethernet Adapter
- Allied Telesyn AT-2970SX/2SC Gigabit Ethernet Adapter
- Allied Telesyn AT-2970TX Gigabit Ethernet Adapter
- Allied Telesyn AT-2970TX/2TX Gigabit Ethernet Adapter
- Allied Telesyn AT-2971SX Gigabit Ethernet Adapter
- Allied Telesyn AT-2971T Gigabit Ethernet Adapter
- Belkin Gigabit Desktop Card 10/100/1000Base-T Adapter, Copper RJ-45
- EG1032 v2 Instant Gigabit Network Adapter
- EG1064 v2 Instant Gigabit Network Adapter
- Marvell 88E8001 Gigabit LOM Ethernet Adapter (Abit)
- Marvell 88E8001 Gigabit LOM Ethernet Adapter (Albatron)
- Marvell 88E8001 Gigabit LOM Ethernet Adapter (Asus)
- Marvell 88E8001 Gigabit LOM Ethernet Adapter (ECS)
- Marvell 88E8001 Gigabit LOM Ethernet Adapter (Epox)
- Marvell 88E8001 Gigabit LOM Ethernet Adapter (Foxconn)
- Marvell 88E8001 Gigabit LOM Ethernet Adapter (Gigabyte)
- Marvell 88E8001 Gigabit LOM Ethernet Adapter (Iwill)
- Marvell 88E8050 Gigabit LOM Ethernet Adapter (Intel)
- Marvell RDK-8001 Adapter
- Marvell RDK-8002 Adapter
- Marvell RDK-8003 Adapter
- Marvell RDK-8004 Adapter
- Marvell RDK-8006 Adapter
- Marvell RDK-8007 Adapter
- Marvell RDK-8008 Adapter
- Marvell RDK-8009 Adapter
- Marvell RDK-8010 Adapter
- Marvell RDK-8011 Adapter
- Marvell RDK-8012 Adapter
- Marvell RDK-8052 Adapter
- Marvell Yukon Gigabit Ethernet 10/100/1000Base-T Adapter (32 bit)
- Marvell Yukon Gigabit Ethernet 10/100/1000Base-T Adapter (64 bit)
- N-Way PCI-Bus Giga-Card 1000/100/10Mbps(L)
- SK-9521 10/100/1000Base-T Adapter
- SK-9521 V2.0 10/100/1000Base-T Adapter
- SK-9821 Gigabit Ethernet Server Adapter (SK-NET GE-T)
- SK-9821 V2.0 Gigabit Ethernet 10/100/1000Base-T Adapter
- SK-9822 Gigabit Ethernet Server Adapter (SK-NET GE-T dual link)
- SK-9841 Gigabit Ethernet Server Adapter (SK-NET GE-LX)
- SK-9841 V2.0 Gigabit Ethernet 1000Base-LX Adapter
- SK-9842 Gigabit Ethernet Server Adapter (SK-NET GE-LX dual link)
- SK-9843 Gigabit Ethernet Server Adapter (SK-NET GE-SX)
- SK-9843 V2.0 Gigabit Ethernet 1000Base-SX Adapter
- SK-9844 Gigabit Ethernet Server Adapter (SK-NET GE-SX dual link)
- SK-9851 V2.0 Gigabit Ethernet 1000Base-SX Adapter
- SK-9861 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition)
- SK-9861 V2.0 Gigabit Ethernet 1000Base-SX Adapter
- SK-9862 Gigabit Ethernet Server Adapter (SK-NET GE-SX Volition dual link)
- SK-9871 Gigabit Ethernet Server Adapter (SK-NET GE-ZX)
- SK-9871 V2.0 Gigabit Ethernet 1000Base-ZX Adapter
- SK-9872 Gigabit Ethernet Server Adapter (SK-NET GE-ZX dual link)
- SMC EZ Card 1000 (SMC9452TXV.2)
The adapters support Jumbo Frames.
The dual link adapters support link-failover and dual port features.
Both Marvell Yukon and SysKonnect SK-98xx/SK-95xx adapters support
the scatter-gather functionality with sendfile(). Please refer to
<file:Documentation/networking/sk98lin.txt> for more information about
optional driver parameters.
Questions concerning this driver may be addressed to:
<linux@syskonnect.de>
If you want to compile this driver as a module ( = code which can be
inserted in and removed from the running kernel whenever you want),
say M here and read <file:Documentation/kbuild/modules.txt>. The module will
be called sk98lin. This is recommended.
config VIA_VELOCITY
tristate "VIA Velocity support"
depends on PCI

Просмотреть файл

@ -15,7 +15,7 @@ obj-$(CONFIG_CHELSIO_T3) += cxgb3/
obj-$(CONFIG_EHEA) += ehea/
obj-$(CONFIG_CAN) += can/
obj-$(CONFIG_BONDING) += bonding/
obj-$(CONFIG_ATL1) += atl1/
obj-$(CONFIG_ATL1) += atlx/
obj-$(CONFIG_GIANFAR) += gianfar_driver.o
obj-$(CONFIG_TEHUTI) += tehuti.o
@ -75,7 +75,6 @@ ps3_gelic-objs += ps3_gelic_net.o $(gelic_wireless-y)
obj-$(CONFIG_TC35815) += tc35815.o
obj-$(CONFIG_SKGE) += skge.o
obj-$(CONFIG_SKY2) += sky2.o
obj-$(CONFIG_SK98LIN) += sk98lin/
obj-$(CONFIG_SKFP) += skfp/
obj-$(CONFIG_VIA_RHINE) += via-rhine.o
obj-$(CONFIG_VIA_VELOCITY) += via-velocity.o
@ -191,6 +190,7 @@ obj-$(CONFIG_ZORRO8390) += zorro8390.o
obj-$(CONFIG_HPLANCE) += hplance.o 7990.o
obj-$(CONFIG_MVME147_NET) += mvme147.o 7990.o
obj-$(CONFIG_EQUALIZER) += eql.o
obj-$(CONFIG_KORINA) += korina.o
obj-$(CONFIG_MIPS_JAZZ_SONIC) += jazzsonic.o
obj-$(CONFIG_MIPS_AU1X00_ENET) += au1000_eth.o
obj-$(CONFIG_MIPS_SIM_NET) += mipsnet.o

Просмотреть файл

@ -1010,7 +1010,7 @@ module_param(io, int, 0);
module_param(irq, int, 0);
module_param(board_type, int, 0);
int __init init_module(void)
static int __init cops_module_init(void)
{
if (io == 0)
printk(KERN_WARNING "%s: You shouldn't autoprobe with insmod\n",
@ -1021,12 +1021,14 @@ int __init init_module(void)
return 0;
}
void __exit cleanup_module(void)
static void __exit cops_module_exit(void)
{
unregister_netdev(cops_dev);
cleanup_card(cops_dev);
free_netdev(cops_dev);
}
module_init(cops_module_init);
module_exit(cops_module_exit);
#endif /* MODULE */
/*

Просмотреть файл

@ -940,7 +940,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
/* is the RECON info empty or old? */
if (!lp->first_recon || !lp->last_recon ||
jiffies - lp->last_recon > HZ * 10) {
time_after(jiffies, lp->last_recon + HZ * 10)) {
if (lp->network_down)
BUGMSG(D_NORMAL, "reconfiguration detected: cabling restored?\n");
lp->first_recon = lp->last_recon = jiffies;
@ -974,7 +974,8 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id)
lp->num_recons = 1;
}
}
} else if (lp->network_down && jiffies - lp->last_recon > HZ * 10) {
} else if (lp->network_down &&
time_after(jiffies, lp->last_recon + HZ * 10)) {
if (lp->network_down)
BUGMSG(D_NORMAL, "cabling restored?\n");
lp->first_recon = lp->last_recon = 0;

Просмотреть файл

@ -348,14 +348,15 @@ MODULE_LICENSE("GPL");
#ifdef MODULE
int init_module(void)
static int __init com20020_module_init(void)
{
BUGLVL(D_NORMAL) printk(VERSION);
return 0;
}
void cleanup_module(void)
static void __exit com20020_module_exit(void)
{
}
module_init(com20020_module_init);
module_exit(com20020_module_exit);
#endif /* MODULE */

Просмотреть файл

@ -881,7 +881,7 @@ MODULE_PARM_DESC(io, "AT1700/FMV18X I/O base address");
MODULE_PARM_DESC(irq, "AT1700/FMV18X IRQ number");
MODULE_PARM_DESC(net_debug, "AT1700/FMV18X debug level (0-6)");
int __init init_module(void)
static int __init at1700_module_init(void)
{
if (io == 0)
printk("at1700: You should not use auto-probing with insmod!\n");
@ -891,13 +891,14 @@ int __init init_module(void)
return 0;
}
void __exit
cleanup_module(void)
static void __exit at1700_module_exit(void)
{
unregister_netdev(dev_at1700);
cleanup_card(dev_at1700);
free_netdev(dev_at1700);
}
module_init(at1700_module_init);
module_exit(at1700_module_exit);
#endif /* MODULE */
MODULE_LICENSE("GPL");

Просмотреть файл

@ -1155,7 +1155,7 @@ static int lance_set_mac_address( struct net_device *dev, void *addr )
#ifdef MODULE
static struct net_device *atarilance_dev;
int __init init_module(void)
static int __init atarilance_module_init(void)
{
atarilance_dev = atarilance_probe(-1);
if (IS_ERR(atarilance_dev))
@ -1163,13 +1163,14 @@ int __init init_module(void)
return 0;
}
void __exit cleanup_module(void)
static void __exit atarilance_module_exit(void)
{
unregister_netdev(atarilance_dev);
free_irq(atarilance_dev->irq, atarilance_dev);
free_netdev(atarilance_dev);
}
module_init(atarilance_module_init);
module_exit(atarilance_module_exit);
#endif /* MODULE */

Просмотреть файл

@ -1,2 +0,0 @@
obj-$(CONFIG_ATL1) += atl1.o
atl1-y += atl1_main.o atl1_hw.o atl1_ethtool.o atl1_param.o

Просмотреть файл

@ -1,286 +0,0 @@
/*
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
* Copyright(c) 2006 Chris Snook <csnook@redhat.com>
* Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef _ATL1_H_
#define _ATL1_H_
#include <linux/types.h>
#include <linux/if_vlan.h>
#include "atl1_hw.h"
/* function prototypes needed by multiple files */
s32 atl1_up(struct atl1_adapter *adapter);
void atl1_down(struct atl1_adapter *adapter);
int atl1_reset(struct atl1_adapter *adapter);
s32 atl1_setup_ring_resources(struct atl1_adapter *adapter);
void atl1_free_ring_resources(struct atl1_adapter *adapter);
extern char atl1_driver_name[];
extern char atl1_driver_version[];
extern const struct ethtool_ops atl1_ethtool_ops;
struct atl1_adapter;
#define ATL1_MAX_INTR 3
#define ATL1_MAX_TX_BUF_LEN 0x3000 /* 12288 bytes */
#define ATL1_DEFAULT_TPD 256
#define ATL1_MAX_TPD 1024
#define ATL1_MIN_TPD 64
#define ATL1_DEFAULT_RFD 512
#define ATL1_MIN_RFD 128
#define ATL1_MAX_RFD 2048
#define ATL1_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
#define ATL1_RFD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_free_desc)
#define ATL1_TPD_DESC(R, i) ATL1_GET_DESC(R, i, struct tx_packet_desc)
#define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc)
/*
* This detached comment is preserved for documentation purposes only.
* It was originally attached to some code that got deleted, but seems
* important enough to keep around...
*
* <begin detached comment>
* Some workarounds require millisecond delays and are run during interrupt
* context. Most notably, when establishing link, the phy may need tweaking
* but cannot process phy register reads/writes faster than millisecond
* intervals...and we establish link due to a "link status change" interrupt.
* <end detached comment>
*/
/*
* atl1_ring_header represents a single, contiguous block of DMA space
* mapped for the three descriptor rings (tpd, rfd, rrd) and the two
* message blocks (cmb, smb) described below
*/
struct atl1_ring_header {
void *desc; /* virtual address */
dma_addr_t dma; /* physical address*/
unsigned int size; /* length in bytes */
};
/*
* atl1_buffer is wrapper around a pointer to a socket buffer
* so a DMA handle can be stored along with the skb
*/
struct atl1_buffer {
struct sk_buff *skb; /* socket buffer */
u16 length; /* rx buffer length */
u16 alloced; /* 1 if skb allocated */
dma_addr_t dma;
};
/* transmit packet descriptor (tpd) ring */
struct atl1_tpd_ring {
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
u16 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
u16 hw_idx; /* hardware index */
atomic_t next_to_clean;
atomic_t next_to_use;
struct atl1_buffer *buffer_info;
};
/* receive free descriptor (rfd) ring */
struct atl1_rfd_ring {
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
u16 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
atomic_t next_to_use;
u16 next_to_clean;
struct atl1_buffer *buffer_info;
};
/* receive return descriptor (rrd) ring */
struct atl1_rrd_ring {
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
unsigned int size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
u16 next_to_use;
atomic_t next_to_clean;
};
/* coalescing message block (cmb) */
struct atl1_cmb {
struct coals_msg_block *cmb;
dma_addr_t dma;
};
/* statistics message block (smb) */
struct atl1_smb {
struct stats_msg_block *smb;
dma_addr_t dma;
};
/* Statistics counters */
struct atl1_sft_stats {
u64 rx_packets;
u64 tx_packets;
u64 rx_bytes;
u64 tx_bytes;
u64 multicast;
u64 collisions;
u64 rx_errors;
u64 rx_length_errors;
u64 rx_crc_errors;
u64 rx_frame_errors;
u64 rx_fifo_errors;
u64 rx_missed_errors;
u64 tx_errors;
u64 tx_fifo_errors;
u64 tx_aborted_errors;
u64 tx_window_errors;
u64 tx_carrier_errors;
u64 tx_pause; /* num pause packets transmitted. */
u64 excecol; /* num tx packets w/ excessive collisions. */
u64 deffer; /* num tx packets deferred */
u64 scc; /* num packets subsequently transmitted
* successfully w/ single prior collision. */
u64 mcc; /* num packets subsequently transmitted
* successfully w/ multiple prior collisions. */
u64 latecol; /* num tx packets w/ late collisions. */
u64 tx_underun; /* num tx packets aborted due to transmit
* FIFO underrun, or TRD FIFO underrun */
u64 tx_trunc; /* num tx packets truncated due to size
* exceeding MTU, regardless whether truncated
* by the chip or not. (The name doesn't really
* reflect the meaning in this case.) */
u64 rx_pause; /* num Pause packets received. */
u64 rx_rrd_ov;
u64 rx_trunc;
};
/* hardware structure */
struct atl1_hw {
u8 __iomem *hw_addr;
struct atl1_adapter *back;
enum atl1_dma_order dma_ord;
enum atl1_dma_rcb rcb_value;
enum atl1_dma_req_block dmar_block;
enum atl1_dma_req_block dmaw_block;
u8 preamble_len;
u8 max_retry; /* Retransmission maximum, after which the
* packet will be discarded */
u8 jam_ipg; /* IPG to start JAM for collision based flow
* control in half-duplex mode. In units of
* 8-bit time */
u8 ipgt; /* Desired back to back inter-packet gap.
* The default is 96-bit time */
u8 min_ifg; /* Minimum number of IFG to enforce in between
* receive frames. Frame gap below such IFP
* is dropped */
u8 ipgr1; /* 64bit Carrier-Sense window */
u8 ipgr2; /* 96-bit IPG window */
u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned
* burst. Each TPD is 16 bytes long */
u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned
* burst. Each RFD is 12 bytes long */
u8 rfd_fetch_gap;
u8 rrd_burst; /* Threshold number of RRDs that can be retired
* in a burst. Each RRD is 16 bytes long */
u8 tpd_fetch_th;
u8 tpd_fetch_gap;
u16 tx_jumbo_task_th;
u16 txf_burst; /* Number of data bytes to read in a cache-
* aligned burst. Each SRAM entry is 8 bytes */
u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN
* packets should add 4 bytes */
u16 rx_jumbo_lkah;
u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after
* every 512ns passes. */
u16 lcol; /* Collision Window */
u16 cmb_tpd;
u16 cmb_rrd;
u16 cmb_rx_timer;
u16 cmb_tx_timer;
u32 smb_timer;
u16 media_type;
u16 autoneg_advertised;
u16 mii_autoneg_adv_reg;
u16 mii_1000t_ctrl_reg;
u32 max_frame_size;
u32 min_frame_size;
u16 dev_rev;
/* spi flash */
u8 flash_vendor;
u8 mac_addr[ETH_ALEN];
u8 perm_mac_addr[ETH_ALEN];
bool phy_configured;
};
struct atl1_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct net_device_stats net_stats;
struct atl1_sft_stats soft_stats;
struct vlan_group *vlgrp;
u32 rx_buffer_len;
u32 wol;
u16 link_speed;
u16 link_duplex;
spinlock_t lock;
struct work_struct tx_timeout_task;
struct work_struct link_chg_task;
struct work_struct pcie_dma_to_rst_task;
struct timer_list watchdog_timer;
struct timer_list phy_config_timer;
bool phy_timer_pending;
/* all descriptor rings' memory */
struct atl1_ring_header ring_header;
/* TX */
struct atl1_tpd_ring tpd_ring;
spinlock_t mb_lock;
/* RX */
struct atl1_rfd_ring rfd_ring;
struct atl1_rrd_ring rrd_ring;
u64 hw_csum_err;
u64 hw_csum_good;
u16 imt; /* interrupt moderator timer (2us resolution */
u16 ict; /* interrupt clear timer (2us resolution */
struct mii_if_info mii; /* MII interface info */
/* structs defined in atl1_hw.h */
u32 bd_number; /* board number */
bool pci_using_64;
struct atl1_hw hw;
struct atl1_smb smb;
struct atl1_cmb cmb;
};
#endif /* _ATL1_H_ */

Просмотреть файл

@ -1,505 +0,0 @@
/*
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
* Copyright(c) 2006 Chris Snook <csnook@redhat.com>
* Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/ethtool.h>
#include <linux/netdevice.h>
#include <linux/mii.h>
#include <asm/uaccess.h>
#include "atl1.h"
struct atl1_stats {
char stat_string[ETH_GSTRING_LEN];
int sizeof_stat;
int stat_offset;
};
#define ATL1_STAT(m) sizeof(((struct atl1_adapter *)0)->m), \
offsetof(struct atl1_adapter, m)
static struct atl1_stats atl1_gstrings_stats[] = {
{"rx_packets", ATL1_STAT(soft_stats.rx_packets)},
{"tx_packets", ATL1_STAT(soft_stats.tx_packets)},
{"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)},
{"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)},
{"rx_errors", ATL1_STAT(soft_stats.rx_errors)},
{"tx_errors", ATL1_STAT(soft_stats.tx_errors)},
{"rx_dropped", ATL1_STAT(net_stats.rx_dropped)},
{"tx_dropped", ATL1_STAT(net_stats.tx_dropped)},
{"multicast", ATL1_STAT(soft_stats.multicast)},
{"collisions", ATL1_STAT(soft_stats.collisions)},
{"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)},
{"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
{"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)},
{"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)},
{"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)},
{"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)},
{"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)},
{"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)},
{"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)},
{"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)},
{"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)},
{"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)},
{"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)},
{"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)},
{"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)},
{"tx_underun", ATL1_STAT(soft_stats.tx_underun)},
{"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)},
{"tx_pause", ATL1_STAT(soft_stats.tx_pause)},
{"rx_pause", ATL1_STAT(soft_stats.rx_pause)},
{"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)},
{"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)}
};
static void atl1_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
int i;
char *p;
for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
p = (char *)adapter+atl1_gstrings_stats[i].stat_offset;
data[i] = (atl1_gstrings_stats[i].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
}
static int atl1_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
return ARRAY_SIZE(atl1_gstrings_stats);
default:
return -EOPNOTSUPP;
}
}
static int atl1_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_hw *hw = &adapter->hw;
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half |
SUPPORTED_100baseT_Full |
SUPPORTED_1000baseT_Full |
SUPPORTED_Autoneg | SUPPORTED_TP);
ecmd->advertising = ADVERTISED_TP;
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
hw->media_type == MEDIA_TYPE_1000M_FULL) {
ecmd->advertising |= ADVERTISED_Autoneg;
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) {
ecmd->advertising |= ADVERTISED_Autoneg;
ecmd->advertising |=
(ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_1000baseT_Full);
}
else
ecmd->advertising |= (ADVERTISED_1000baseT_Full);
}
ecmd->port = PORT_TP;
ecmd->phy_address = 0;
ecmd->transceiver = XCVR_INTERNAL;
if (netif_carrier_ok(adapter->netdev)) {
u16 link_speed, link_duplex;
atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex);
ecmd->speed = link_speed;
if (link_duplex == FULL_DUPLEX)
ecmd->duplex = DUPLEX_FULL;
else
ecmd->duplex = DUPLEX_HALF;
} else {
ecmd->speed = -1;
ecmd->duplex = -1;
}
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
hw->media_type == MEDIA_TYPE_1000M_FULL)
ecmd->autoneg = AUTONEG_ENABLE;
else
ecmd->autoneg = AUTONEG_DISABLE;
return 0;
}
static int atl1_set_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_hw *hw = &adapter->hw;
u16 phy_data;
int ret_val = 0;
u16 old_media_type = hw->media_type;
if (netif_running(adapter->netdev)) {
dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n");
atl1_down(adapter);
}
if (ecmd->autoneg == AUTONEG_ENABLE)
hw->media_type = MEDIA_TYPE_AUTO_SENSOR;
else {
if (ecmd->speed == SPEED_1000) {
if (ecmd->duplex != DUPLEX_FULL) {
dev_warn(&adapter->pdev->dev,
"can't force to 1000M half duplex\n");
ret_val = -EINVAL;
goto exit_sset;
}
hw->media_type = MEDIA_TYPE_1000M_FULL;
} else if (ecmd->speed == SPEED_100) {
if (ecmd->duplex == DUPLEX_FULL) {
hw->media_type = MEDIA_TYPE_100M_FULL;
} else
hw->media_type = MEDIA_TYPE_100M_HALF;
} else {
if (ecmd->duplex == DUPLEX_FULL)
hw->media_type = MEDIA_TYPE_10M_FULL;
else
hw->media_type = MEDIA_TYPE_10M_HALF;
}
}
switch (hw->media_type) {
case MEDIA_TYPE_AUTO_SENSOR:
ecmd->advertising =
ADVERTISED_10baseT_Half |
ADVERTISED_10baseT_Full |
ADVERTISED_100baseT_Half |
ADVERTISED_100baseT_Full |
ADVERTISED_1000baseT_Full |
ADVERTISED_Autoneg | ADVERTISED_TP;
break;
case MEDIA_TYPE_1000M_FULL:
ecmd->advertising =
ADVERTISED_1000baseT_Full |
ADVERTISED_Autoneg | ADVERTISED_TP;
break;
default:
ecmd->advertising = 0;
break;
}
if (atl1_phy_setup_autoneg_adv(hw)) {
ret_val = -EINVAL;
dev_warn(&adapter->pdev->dev,
"invalid ethtool speed/duplex setting\n");
goto exit_sset;
}
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
hw->media_type == MEDIA_TYPE_1000M_FULL)
phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
else {
switch (hw->media_type) {
case MEDIA_TYPE_100M_FULL:
phy_data =
MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
MII_CR_RESET;
break;
case MEDIA_TYPE_100M_HALF:
phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
break;
case MEDIA_TYPE_10M_FULL:
phy_data =
MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
break;
default: /* MEDIA_TYPE_10M_HALF: */
phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
break;
}
}
atl1_write_phy_reg(hw, MII_BMCR, phy_data);
exit_sset:
if (ret_val)
hw->media_type = old_media_type;
if (netif_running(adapter->netdev)) {
dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n");
atl1_up(adapter);
} else if (!ret_val) {
dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n");
atl1_reset(adapter);
}
return ret_val;
}
static void atl1_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *drvinfo)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
strncpy(drvinfo->driver, atl1_driver_name, sizeof(drvinfo->driver));
strncpy(drvinfo->version, atl1_driver_version,
sizeof(drvinfo->version));
strncpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version));
strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info));
drvinfo->eedump_len = ATL1_EEDUMP_LEN;
}
static void atl1_get_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_MAGIC;
wol->wolopts = 0;
if (adapter->wol & ATL1_WUFC_EX)
wol->wolopts |= WAKE_UCAST;
if (adapter->wol & ATL1_WUFC_MC)
wol->wolopts |= WAKE_MCAST;
if (adapter->wol & ATL1_WUFC_BC)
wol->wolopts |= WAKE_BCAST;
if (adapter->wol & ATL1_WUFC_MAG)
wol->wolopts |= WAKE_MAGIC;
return;
}
static int atl1_set_wol(struct net_device *netdev,
struct ethtool_wolinfo *wol)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
return -EOPNOTSUPP;
adapter->wol = 0;
if (wol->wolopts & WAKE_UCAST)
adapter->wol |= ATL1_WUFC_EX;
if (wol->wolopts & WAKE_MCAST)
adapter->wol |= ATL1_WUFC_MC;
if (wol->wolopts & WAKE_BCAST)
adapter->wol |= ATL1_WUFC_BC;
if (wol->wolopts & WAKE_MAGIC)
adapter->wol |= ATL1_WUFC_MAG;
return 0;
}
static void atl1_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *txdr = &adapter->tpd_ring;
struct atl1_rfd_ring *rxdr = &adapter->rfd_ring;
ring->rx_max_pending = ATL1_MAX_RFD;
ring->tx_max_pending = ATL1_MAX_TPD;
ring->rx_mini_max_pending = 0;
ring->rx_jumbo_max_pending = 0;
ring->rx_pending = rxdr->count;
ring->tx_pending = txdr->count;
ring->rx_mini_pending = 0;
ring->rx_jumbo_pending = 0;
}
static int atl1_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *tpdr = &adapter->tpd_ring;
struct atl1_rrd_ring *rrdr = &adapter->rrd_ring;
struct atl1_rfd_ring *rfdr = &adapter->rfd_ring;
struct atl1_tpd_ring tpd_old, tpd_new;
struct atl1_rfd_ring rfd_old, rfd_new;
struct atl1_rrd_ring rrd_old, rrd_new;
struct atl1_ring_header rhdr_old, rhdr_new;
int err;
tpd_old = adapter->tpd_ring;
rfd_old = adapter->rfd_ring;
rrd_old = adapter->rrd_ring;
rhdr_old = adapter->ring_header;
if (netif_running(adapter->netdev))
atl1_down(adapter);
rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD);
rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD :
rfdr->count;
rfdr->count = (rfdr->count + 3) & ~3;
rrdr->count = rfdr->count;
tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD);
tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD :
tpdr->count;
tpdr->count = (tpdr->count + 3) & ~3;
if (netif_running(adapter->netdev)) {
/* try to get new resources before deleting old */
err = atl1_setup_ring_resources(adapter);
if (err)
goto err_setup_ring;
/*
* save the new, restore the old in order to free it,
* then restore the new back again
*/
rfd_new = adapter->rfd_ring;
rrd_new = adapter->rrd_ring;
tpd_new = adapter->tpd_ring;
rhdr_new = adapter->ring_header;
adapter->rfd_ring = rfd_old;
adapter->rrd_ring = rrd_old;
adapter->tpd_ring = tpd_old;
adapter->ring_header = rhdr_old;
atl1_free_ring_resources(adapter);
adapter->rfd_ring = rfd_new;
adapter->rrd_ring = rrd_new;
adapter->tpd_ring = tpd_new;
adapter->ring_header = rhdr_new;
err = atl1_up(adapter);
if (err)
return err;
}
return 0;
err_setup_ring:
adapter->rfd_ring = rfd_old;
adapter->rrd_ring = rrd_old;
adapter->tpd_ring = tpd_old;
adapter->ring_header = rhdr_old;
atl1_up(adapter);
return err;
}
static void atl1_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *epause)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_hw *hw = &adapter->hw;
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
hw->media_type == MEDIA_TYPE_1000M_FULL) {
epause->autoneg = AUTONEG_ENABLE;
} else {
epause->autoneg = AUTONEG_DISABLE;
}
epause->rx_pause = 1;
epause->tx_pause = 1;
}
static int atl1_set_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *epause)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_hw *hw = &adapter->hw;
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
hw->media_type == MEDIA_TYPE_1000M_FULL) {
epause->autoneg = AUTONEG_ENABLE;
} else {
epause->autoneg = AUTONEG_DISABLE;
}
epause->rx_pause = 1;
epause->tx_pause = 1;
return 0;
}
static u32 atl1_get_rx_csum(struct net_device *netdev)
{
return 1;
}
static void atl1_get_strings(struct net_device *netdev, u32 stringset,
u8 *data)
{
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) {
memcpy(p, atl1_gstrings_stats[i].stat_string,
ETH_GSTRING_LEN);
p += ETH_GSTRING_LEN;
}
break;
}
}
static int atl1_nway_reset(struct net_device *netdev)
{
struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_hw *hw = &adapter->hw;
if (netif_running(netdev)) {
u16 phy_data;
atl1_down(adapter);
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
hw->media_type == MEDIA_TYPE_1000M_FULL) {
phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
} else {
switch (hw->media_type) {
case MEDIA_TYPE_100M_FULL:
phy_data = MII_CR_FULL_DUPLEX |
MII_CR_SPEED_100 | MII_CR_RESET;
break;
case MEDIA_TYPE_100M_HALF:
phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
break;
case MEDIA_TYPE_10M_FULL:
phy_data = MII_CR_FULL_DUPLEX |
MII_CR_SPEED_10 | MII_CR_RESET;
break;
default: /* MEDIA_TYPE_10M_HALF */
phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
}
}
atl1_write_phy_reg(hw, MII_BMCR, phy_data);
atl1_up(adapter);
}
return 0;
}
const struct ethtool_ops atl1_ethtool_ops = {
.get_settings = atl1_get_settings,
.set_settings = atl1_set_settings,
.get_drvinfo = atl1_get_drvinfo,
.get_wol = atl1_get_wol,
.set_wol = atl1_set_wol,
.get_ringparam = atl1_get_ringparam,
.set_ringparam = atl1_set_ringparam,
.get_pauseparam = atl1_get_pauseparam,
.set_pauseparam = atl1_set_pauseparam,
.get_rx_csum = atl1_get_rx_csum,
.set_tx_csum = ethtool_op_set_tx_hw_csum,
.get_link = ethtool_op_get_link,
.set_sg = ethtool_op_set_sg,
.get_strings = atl1_get_strings,
.nway_reset = atl1_nway_reset,
.get_ethtool_stats = atl1_get_ethtool_stats,
.get_sset_count = atl1_get_sset_count,
.set_tso = ethtool_op_set_tso,
};

Просмотреть файл

@ -1,720 +0,0 @@
/*
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
* Copyright(c) 2006 Chris Snook <csnook@redhat.com>
* Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include <asm/byteorder.h>
#include "atl1.h"
/*
* Reset the transmit and receive units; mask and clear all interrupts.
* hw - Struct containing variables accessed by shared code
* return : ATL1_SUCCESS or idle status (if error)
*/
s32 atl1_reset_hw(struct atl1_hw *hw)
{
struct pci_dev *pdev = hw->back->pdev;
u32 icr;
int i;
/*
* Clear Interrupt mask to stop board from generating
* interrupts & Clear any pending interrupt events
*/
/*
* iowrite32(0, hw->hw_addr + REG_IMR);
* iowrite32(0xffffffff, hw->hw_addr + REG_ISR);
*/
/*
* Issue Soft Reset to the MAC. This will reset the chip's
* transmit, receive, DMA. It will not effect
* the current PCI configuration. The global reset bit is self-
* clearing, and should clear within a microsecond.
*/
iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL);
ioread32(hw->hw_addr + REG_MASTER_CTRL);
iowrite16(1, hw->hw_addr + REG_GPHY_ENABLE);
ioread16(hw->hw_addr + REG_GPHY_ENABLE);
msleep(1); /* delay about 1ms */
/* Wait at least 10ms for All module to be Idle */
for (i = 0; i < 10; i++) {
icr = ioread32(hw->hw_addr + REG_IDLE_STATUS);
if (!icr)
break;
msleep(1); /* delay 1 ms */
cpu_relax(); /* FIXME: is this still the right way to do this? */
}
if (icr) {
dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr);
return icr;
}
return ATL1_SUCCESS;
}
/* function about EEPROM
*
* check_eeprom_exist
* return 0 if eeprom exist
*/
static int atl1_check_eeprom_exist(struct atl1_hw *hw)
{
u32 value;
value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
if (value & SPI_FLASH_CTRL_EN_VPD) {
value &= ~SPI_FLASH_CTRL_EN_VPD;
iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
}
value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST);
return ((value & 0xFF00) == 0x6C00) ? 0 : 1;
}
static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value)
{
int i;
u32 control;
if (offset & 3)
return false; /* address do not align */
iowrite32(0, hw->hw_addr + REG_VPD_DATA);
control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT;
iowrite32(control, hw->hw_addr + REG_VPD_CAP);
ioread32(hw->hw_addr + REG_VPD_CAP);
for (i = 0; i < 10; i++) {
msleep(2);
control = ioread32(hw->hw_addr + REG_VPD_CAP);
if (control & VPD_CAP_VPD_FLAG)
break;
}
if (control & VPD_CAP_VPD_FLAG) {
*p_value = ioread32(hw->hw_addr + REG_VPD_DATA);
return true;
}
return false; /* timeout */
}
/*
* Reads the value from a PHY register
* hw - Struct containing variables accessed by shared code
* reg_addr - address of the PHY register to read
*/
s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data)
{
u32 val;
int i;
val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT |
MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 <<
MDIO_CLK_SEL_SHIFT;
iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
ioread32(hw->hw_addr + REG_MDIO_CTRL);
for (i = 0; i < MDIO_WAIT_TIMES; i++) {
udelay(2);
val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
if (!(val & (MDIO_START | MDIO_BUSY)))
break;
}
if (!(val & (MDIO_START | MDIO_BUSY))) {
*phy_data = (u16) val;
return ATL1_SUCCESS;
}
return ATL1_ERR_PHY;
}
#define CUSTOM_SPI_CS_SETUP 2
#define CUSTOM_SPI_CLK_HI 2
#define CUSTOM_SPI_CLK_LO 2
#define CUSTOM_SPI_CS_HOLD 2
#define CUSTOM_SPI_CS_HI 3
static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf)
{
int i;
u32 value;
iowrite32(0, hw->hw_addr + REG_SPI_DATA);
iowrite32(addr, hw->hw_addr + REG_SPI_ADDR);
value = SPI_FLASH_CTRL_WAIT_READY |
(CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) <<
SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI &
SPI_FLASH_CTRL_CLK_HI_MASK) <<
SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO &
SPI_FLASH_CTRL_CLK_LO_MASK) <<
SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD &
SPI_FLASH_CTRL_CS_HOLD_MASK) <<
SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI &
SPI_FLASH_CTRL_CS_HI_MASK) <<
SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) <<
SPI_FLASH_CTRL_INS_SHIFT;
iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
value |= SPI_FLASH_CTRL_START;
iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL);
ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
for (i = 0; i < 10; i++) {
msleep(1); /* 1ms */
value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL);
if (!(value & SPI_FLASH_CTRL_START))
break;
}
if (value & SPI_FLASH_CTRL_START)
return false;
*buf = ioread32(hw->hw_addr + REG_SPI_DATA);
return true;
}
/*
* get_permanent_address
* return 0 if get valid mac address,
*/
static int atl1_get_permanent_address(struct atl1_hw *hw)
{
u32 addr[2];
u32 i, control;
u16 reg;
u8 eth_addr[ETH_ALEN];
bool key_valid;
if (is_valid_ether_addr(hw->perm_mac_addr))
return 0;
/* init */
addr[0] = addr[1] = 0;
if (!atl1_check_eeprom_exist(hw)) { /* eeprom exist */
reg = 0;
key_valid = false;
/* Read out all EEPROM content */
i = 0;
while (1) {
if (atl1_read_eeprom(hw, i + 0x100, &control)) {
if (key_valid) {
if (reg == REG_MAC_STA_ADDR)
addr[0] = control;
else if (reg == (REG_MAC_STA_ADDR + 4))
addr[1] = control;
key_valid = false;
} else if ((control & 0xff) == 0x5A) {
key_valid = true;
reg = (u16) (control >> 16);
} else
break; /* assume data end while encount an invalid KEYWORD */
} else
break; /* read error */
i += 4;
}
*(u32 *) &eth_addr[2] = swab32(addr[0]);
*(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
if (is_valid_ether_addr(eth_addr)) {
memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
return 0;
}
return 1;
}
/* see if SPI FLAGS exist ? */
addr[0] = addr[1] = 0;
reg = 0;
key_valid = false;
i = 0;
while (1) {
if (atl1_spi_read(hw, i + 0x1f000, &control)) {
if (key_valid) {
if (reg == REG_MAC_STA_ADDR)
addr[0] = control;
else if (reg == (REG_MAC_STA_ADDR + 4))
addr[1] = control;
key_valid = false;
} else if ((control & 0xff) == 0x5A) {
key_valid = true;
reg = (u16) (control >> 16);
} else
break; /* data end */
} else
break; /* read error */
i += 4;
}
*(u32 *) &eth_addr[2] = swab32(addr[0]);
*(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
if (is_valid_ether_addr(eth_addr)) {
memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
return 0;
}
/*
* On some motherboards, the MAC address is written by the
* BIOS directly to the MAC register during POST, and is
* not stored in eeprom. If all else thus far has failed
* to fetch the permanent MAC address, try reading it directly.
*/
addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR);
addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4));
*(u32 *) &eth_addr[2] = swab32(addr[0]);
*(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]);
if (is_valid_ether_addr(eth_addr)) {
memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN);
return 0;
}
return 1;
}
/*
* Reads the adapter's MAC address from the EEPROM
* hw - Struct containing variables accessed by shared code
*/
s32 atl1_read_mac_addr(struct atl1_hw *hw)
{
u16 i;
if (atl1_get_permanent_address(hw))
random_ether_addr(hw->perm_mac_addr);
for (i = 0; i < ETH_ALEN; i++)
hw->mac_addr[i] = hw->perm_mac_addr[i];
return ATL1_SUCCESS;
}
/*
* Hashes an address to determine its location in the multicast table
* hw - Struct containing variables accessed by shared code
* mc_addr - the multicast address to hash
*
* atl1_hash_mc_addr
* purpose
* set hash value for a multicast address
* hash calcu processing :
* 1. calcu 32bit CRC for multicast address
* 2. reverse crc with MSB to LSB
*/
u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr)
{
u32 crc32, value = 0;
int i;
crc32 = ether_crc_le(6, mc_addr);
for (i = 0; i < 32; i++)
value |= (((crc32 >> i) & 1) << (31 - i));
return value;
}
/*
* Sets the bit in the multicast table corresponding to the hash value.
* hw - Struct containing variables accessed by shared code
* hash_value - Multicast address hash value
*/
void atl1_hash_set(struct atl1_hw *hw, u32 hash_value)
{
u32 hash_bit, hash_reg;
u32 mta;
/*
* The HASH Table is a register array of 2 32-bit registers.
* It is treated like an array of 64 bits. We want to set
* bit BitArray[hash_value]. So we figure out what register
* the bit is in, read it, OR in the new bit, then write
* back the new value. The register is determined by the
* upper 7 bits of the hash value and the bit within that
* register are determined by the lower 5 bits of the value.
*/
hash_reg = (hash_value >> 31) & 0x1;
hash_bit = (hash_value >> 26) & 0x1F;
mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
mta |= (1 << hash_bit);
iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2));
}
/*
* Writes a value to a PHY register
* hw - Struct containing variables accessed by shared code
* reg_addr - address of the PHY register to write
* data - data to write to the PHY
*/
s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data)
{
int i;
u32 val;
val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT |
(reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT |
MDIO_SUP_PREAMBLE |
MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT;
iowrite32(val, hw->hw_addr + REG_MDIO_CTRL);
ioread32(hw->hw_addr + REG_MDIO_CTRL);
for (i = 0; i < MDIO_WAIT_TIMES; i++) {
udelay(2);
val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
if (!(val & (MDIO_START | MDIO_BUSY)))
break;
}
if (!(val & (MDIO_START | MDIO_BUSY)))
return ATL1_SUCCESS;
return ATL1_ERR_PHY;
}
/*
* Make L001's PHY out of Power Saving State (bug)
* hw - Struct containing variables accessed by shared code
* when power on, L001's PHY always on Power saving State
* (Gigabit Link forbidden)
*/
static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw)
{
s32 ret;
ret = atl1_write_phy_reg(hw, 29, 0x0029);
if (ret)
return ret;
return atl1_write_phy_reg(hw, 30, 0);
}
/*
*TODO: do something or get rid of this
*/
s32 atl1_phy_enter_power_saving(struct atl1_hw *hw)
{
/* s32 ret_val;
* u16 phy_data;
*/
/*
ret_val = atl1_write_phy_reg(hw, ...);
ret_val = atl1_write_phy_reg(hw, ...);
....
*/
return ATL1_SUCCESS;
}
/*
* Resets the PHY and make all config validate
* hw - Struct containing variables accessed by shared code
*
* Sets bit 15 and 12 of the MII Control regiser (for F001 bug)
*/
static s32 atl1_phy_reset(struct atl1_hw *hw)
{
struct pci_dev *pdev = hw->back->pdev;
s32 ret_val;
u16 phy_data;
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
hw->media_type == MEDIA_TYPE_1000M_FULL)
phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN;
else {
switch (hw->media_type) {
case MEDIA_TYPE_100M_FULL:
phy_data =
MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 |
MII_CR_RESET;
break;
case MEDIA_TYPE_100M_HALF:
phy_data = MII_CR_SPEED_100 | MII_CR_RESET;
break;
case MEDIA_TYPE_10M_FULL:
phy_data =
MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET;
break;
default: /* MEDIA_TYPE_10M_HALF: */
phy_data = MII_CR_SPEED_10 | MII_CR_RESET;
break;
}
}
ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data);
if (ret_val) {
u32 val;
int i;
/* pcie serdes link may be down! */
dev_dbg(&pdev->dev, "pcie phy link down\n");
for (i = 0; i < 25; i++) {
msleep(1);
val = ioread32(hw->hw_addr + REG_MDIO_CTRL);
if (!(val & (MDIO_START | MDIO_BUSY)))
break;
}
if ((val & (MDIO_START | MDIO_BUSY)) != 0) {
dev_warn(&pdev->dev, "pcie link down at least 25ms\n");
return ret_val;
}
}
return ATL1_SUCCESS;
}
/*
* Configures PHY autoneg and flow control advertisement settings
* hw - Struct containing variables accessed by shared code
*/
s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw)
{
s32 ret_val;
s16 mii_autoneg_adv_reg;
s16 mii_1000t_ctrl_reg;
/* Read the MII Auto-Neg Advertisement Register (Address 4). */
mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK;
/* Read the MII 1000Base-T Control Register (Address 9). */
mii_1000t_ctrl_reg = MII_AT001_CR_1000T_DEFAULT_CAP_MASK;
/*
* First we clear all the 10/100 mb speed bits in the Auto-Neg
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T Control Register (Address 9).
*/
mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK;
mii_1000t_ctrl_reg &= ~MII_AT001_CR_1000T_SPEED_MASK;
/*
* Need to parse media_type and set up
* the appropriate PHY registers.
*/
switch (hw->media_type) {
case MEDIA_TYPE_AUTO_SENSOR:
mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS |
MII_AR_10T_FD_CAPS |
MII_AR_100TX_HD_CAPS |
MII_AR_100TX_FD_CAPS);
mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS;
break;
case MEDIA_TYPE_1000M_FULL:
mii_1000t_ctrl_reg |= MII_AT001_CR_1000T_FD_CAPS;
break;
case MEDIA_TYPE_100M_FULL:
mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS;
break;
case MEDIA_TYPE_100M_HALF:
mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS;
break;
case MEDIA_TYPE_10M_FULL:
mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS;
break;
default:
mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS;
break;
}
/* flow control fixed to enable all */
mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE);
hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg;
hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg;
ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg);
if (ret_val)
return ret_val;
ret_val = atl1_write_phy_reg(hw, MII_AT001_CR, mii_1000t_ctrl_reg);
if (ret_val)
return ret_val;
return ATL1_SUCCESS;
}
/*
* Configures link settings.
* hw - Struct containing variables accessed by shared code
* Assumes the hardware has previously been reset and the
* transmitter and receiver are not enabled.
*/
static s32 atl1_setup_link(struct atl1_hw *hw)
{
struct pci_dev *pdev = hw->back->pdev;
s32 ret_val;
/*
* Options:
* PHY will advertise value(s) parsed from
* autoneg_advertised and fc
* no matter what autoneg is , We will not wait link result.
*/
ret_val = atl1_phy_setup_autoneg_adv(hw);
if (ret_val) {
dev_dbg(&pdev->dev, "error setting up autonegotiation\n");
return ret_val;
}
/* SW.Reset , En-Auto-Neg if needed */
ret_val = atl1_phy_reset(hw);
if (ret_val) {
dev_dbg(&pdev->dev, "error resetting phy\n");
return ret_val;
}
hw->phy_configured = true;
return ret_val;
}
static struct atl1_spi_flash_dev flash_table[] = {
/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SECTOR_ERASE CHIP_ERASE */
{"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62},
{"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60},
{"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7},
};
static void atl1_init_flash_opcode(struct atl1_hw *hw)
{
if (hw->flash_vendor >= ARRAY_SIZE(flash_table))
hw->flash_vendor = 0; /* ATMEL */
/* Init OP table */
iowrite8(flash_table[hw->flash_vendor].cmd_program,
hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM);
iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase,
hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE);
iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase,
hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE);
iowrite8(flash_table[hw->flash_vendor].cmd_rdid,
hw->hw_addr + REG_SPI_FLASH_OP_RDID);
iowrite8(flash_table[hw->flash_vendor].cmd_wren,
hw->hw_addr + REG_SPI_FLASH_OP_WREN);
iowrite8(flash_table[hw->flash_vendor].cmd_rdsr,
hw->hw_addr + REG_SPI_FLASH_OP_RDSR);
iowrite8(flash_table[hw->flash_vendor].cmd_wrsr,
hw->hw_addr + REG_SPI_FLASH_OP_WRSR);
iowrite8(flash_table[hw->flash_vendor].cmd_read,
hw->hw_addr + REG_SPI_FLASH_OP_READ);
}
/*
* Performs basic configuration of the adapter.
* hw - Struct containing variables accessed by shared code
* Assumes that the controller has previously been reset and is in a
* post-reset uninitialized state. Initializes multicast table,
* and Calls routines to setup link
* Leaves the transmit and receive units disabled and uninitialized.
*/
s32 atl1_init_hw(struct atl1_hw *hw)
{
u32 ret_val = 0;
/* Zero out the Multicast HASH table */
iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
/* clear the old settings from the multicast hash table */
iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
atl1_init_flash_opcode(hw);
if (!hw->phy_configured) {
/* enable GPHY LinkChange Interrrupt */
ret_val = atl1_write_phy_reg(hw, 18, 0xC00);
if (ret_val)
return ret_val;
/* make PHY out of power-saving state */
ret_val = atl1_phy_leave_power_saving(hw);
if (ret_val)
return ret_val;
/* Call a subroutine to configure the link */
ret_val = atl1_setup_link(hw);
}
return ret_val;
}
/*
* Detects the current speed and duplex settings of the hardware.
* hw - Struct containing variables accessed by shared code
* speed - Speed of the connection
* duplex - Duplex setting of the connection
*/
s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex)
{
struct pci_dev *pdev = hw->back->pdev;
s32 ret_val;
u16 phy_data;
/* ; --- Read PHY Specific Status Register (17) */
ret_val = atl1_read_phy_reg(hw, MII_AT001_PSSR, &phy_data);
if (ret_val)
return ret_val;
if (!(phy_data & MII_AT001_PSSR_SPD_DPLX_RESOLVED))
return ATL1_ERR_PHY_RES;
switch (phy_data & MII_AT001_PSSR_SPEED) {
case MII_AT001_PSSR_1000MBS:
*speed = SPEED_1000;
break;
case MII_AT001_PSSR_100MBS:
*speed = SPEED_100;
break;
case MII_AT001_PSSR_10MBS:
*speed = SPEED_10;
break;
default:
dev_dbg(&pdev->dev, "error getting speed\n");
return ATL1_ERR_PHY_SPEED;
break;
}
if (phy_data & MII_AT001_PSSR_DPLX)
*duplex = FULL_DUPLEX;
else
*duplex = HALF_DUPLEX;
return ATL1_SUCCESS;
}
void atl1_set_mac_addr(struct atl1_hw *hw)
{
u32 value;
/*
* 00-0B-6A-F6-00-DC
* 0: 6AF600DC 1: 000B
* low dword
*/
value = (((u32) hw->mac_addr[2]) << 24) |
(((u32) hw->mac_addr[3]) << 16) |
(((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5]));
iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR);
/* high dword */
value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1]));
iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2));
}

Просмотреть файл

@ -1,946 +0,0 @@
/*
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
* Copyright(c) 2006 Chris Snook <csnook@redhat.com>
* Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* There are a lot of defines in here that are unused and/or have cryptic
* names. Please leave them alone, as they're the closest thing we have
* to a spec from Attansic at present. *ahem* -- CHS
*/
#ifndef _ATL1_HW_H_
#define _ATL1_HW_H_
#include <linux/types.h>
#include <linux/mii.h>
struct atl1_adapter;
struct atl1_hw;
/* function prototypes needed by multiple files */
s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw);
s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data);
s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex);
s32 atl1_read_mac_addr(struct atl1_hw *hw);
s32 atl1_init_hw(struct atl1_hw *hw);
s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex);
s32 atl1_set_speed_and_duplex(struct atl1_hw *hw, u16 speed, u16 duplex);
u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
void atl1_set_mac_addr(struct atl1_hw *hw);
s32 atl1_phy_enter_power_saving(struct atl1_hw *hw);
s32 atl1_reset_hw(struct atl1_hw *hw);
void atl1_check_options(struct atl1_adapter *adapter);
/* register definitions */
#define REG_PCIE_CAP_LIST 0x58
#define REG_VPD_CAP 0x6C
#define VPD_CAP_ID_MASK 0xff
#define VPD_CAP_ID_SHIFT 0
#define VPD_CAP_NEXT_PTR_MASK 0xFF
#define VPD_CAP_NEXT_PTR_SHIFT 8
#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
#define VPD_CAP_VPD_ADDR_SHIFT 16
#define VPD_CAP_VPD_FLAG 0x80000000
#define REG_VPD_DATA 0x70
#define REG_SPI_FLASH_CTRL 0x200
#define SPI_FLASH_CTRL_STS_NON_RDY 0x1
#define SPI_FLASH_CTRL_STS_WEN 0x2
#define SPI_FLASH_CTRL_STS_WPEN 0x80
#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF
#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0
#define SPI_FLASH_CTRL_INS_MASK 0x7
#define SPI_FLASH_CTRL_INS_SHIFT 8
#define SPI_FLASH_CTRL_START 0x800
#define SPI_FLASH_CTRL_EN_VPD 0x2000
#define SPI_FLASH_CTRL_LDSTART 0x8000
#define SPI_FLASH_CTRL_CS_HI_MASK 0x3
#define SPI_FLASH_CTRL_CS_HI_SHIFT 16
#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3
#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18
#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3
#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20
#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3
#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22
#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3
#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24
#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3
#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26
#define SPI_FLASH_CTRL_WAIT_READY 0x10000000
#define REG_SPI_ADDR 0x204
#define REG_SPI_DATA 0x208
#define REG_SPI_FLASH_CONFIG 0x20C
#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF
#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0
#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3
#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24
#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000
#define REG_SPI_FLASH_OP_PROGRAM 0x210
#define REG_SPI_FLASH_OP_SC_ERASE 0x211
#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212
#define REG_SPI_FLASH_OP_RDID 0x213
#define REG_SPI_FLASH_OP_WREN 0x214
#define REG_SPI_FLASH_OP_RDSR 0x215
#define REG_SPI_FLASH_OP_WRSR 0x216
#define REG_SPI_FLASH_OP_READ 0x217
#define REG_TWSI_CTRL 0x218
#define TWSI_CTRL_LD_OFFSET_MASK 0xFF
#define TWSI_CTRL_LD_OFFSET_SHIFT 0
#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7
#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
#define TWSI_CTRL_SW_LDSTART 0x800
#define TWSI_CTRL_HW_LDSTART 0x1000
#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
#define TWSI_CTRL_LD_EXIST 0x400000
#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23
#define TWSI_CTRL_FREQ_SEL_100K 0
#define TWSI_CTRL_FREQ_SEL_200K 1
#define TWSI_CTRL_FREQ_SEL_300K 2
#define TWSI_CTRL_FREQ_SEL_400K 3
#define TWSI_CTRL_SMB_SLV_ADDR
#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3
#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24
#define REG_PCIE_DEV_MISC_CTRL 0x21C
#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2
#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1
#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4
#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8
#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10
/* Selene Master Control Register */
#define REG_MASTER_CTRL 0x1400
#define MASTER_CTRL_SOFT_RST 0x1
#define MASTER_CTRL_MTIMER_EN 0x2
#define MASTER_CTRL_ITIMER_EN 0x4
#define MASTER_CTRL_MANUAL_INT 0x8
#define MASTER_CTRL_REV_NUM_SHIFT 16
#define MASTER_CTRL_REV_NUM_MASK 0xff
#define MASTER_CTRL_DEV_ID_SHIFT 24
#define MASTER_CTRL_DEV_ID_MASK 0xff
/* Timer Initial Value Register */
#define REG_MANUAL_TIMER_INIT 0x1404
/* IRQ ModeratorTimer Initial Value Register */
#define REG_IRQ_MODU_TIMER_INIT 0x1408
#define REG_GPHY_ENABLE 0x140C
/* IRQ Anti-Lost Timer Initial Value Register */
#define REG_CMBDISDMA_TIMER 0x140E
/* Block IDLE Status Register */
#define REG_IDLE_STATUS 0x1410
#define IDLE_STATUS_RXMAC 1
#define IDLE_STATUS_TXMAC 2
#define IDLE_STATUS_RXQ 4
#define IDLE_STATUS_TXQ 8
#define IDLE_STATUS_DMAR 0x10
#define IDLE_STATUS_DMAW 0x20
#define IDLE_STATUS_SMB 0x40
#define IDLE_STATUS_CMB 0x80
/* MDIO Control Register */
#define REG_MDIO_CTRL 0x1414
#define MDIO_DATA_MASK 0xffff
#define MDIO_DATA_SHIFT 0
#define MDIO_REG_ADDR_MASK 0x1f
#define MDIO_REG_ADDR_SHIFT 16
#define MDIO_RW 0x200000
#define MDIO_SUP_PREAMBLE 0x400000
#define MDIO_START 0x800000
#define MDIO_CLK_SEL_SHIFT 24
#define MDIO_CLK_25_4 0
#define MDIO_CLK_25_6 2
#define MDIO_CLK_25_8 3
#define MDIO_CLK_25_10 4
#define MDIO_CLK_25_14 5
#define MDIO_CLK_25_20 6
#define MDIO_CLK_25_28 7
#define MDIO_BUSY 0x8000000
#define MDIO_WAIT_TIMES 30
/* MII PHY Status Register */
#define REG_PHY_STATUS 0x1418
/* BIST Control and Status Register0 (for the Packet Memory) */
#define REG_BIST0_CTRL 0x141c
#define BIST0_NOW 0x1
#define BIST0_SRAM_FAIL 0x2
#define BIST0_FUSE_FLAG 0x4
#define REG_BIST1_CTRL 0x1420
#define BIST1_NOW 0x1
#define BIST1_SRAM_FAIL 0x2
#define BIST1_FUSE_FLAG 0x4
/* MAC Control Register */
#define REG_MAC_CTRL 0x1480
#define MAC_CTRL_TX_EN 1
#define MAC_CTRL_RX_EN 2
#define MAC_CTRL_TX_FLOW 4
#define MAC_CTRL_RX_FLOW 8
#define MAC_CTRL_LOOPBACK 0x10
#define MAC_CTRL_DUPLX 0x20
#define MAC_CTRL_ADD_CRC 0x40
#define MAC_CTRL_PAD 0x80
#define MAC_CTRL_LENCHK 0x100
#define MAC_CTRL_HUGE_EN 0x200
#define MAC_CTRL_PRMLEN_SHIFT 10
#define MAC_CTRL_PRMLEN_MASK 0xf
#define MAC_CTRL_RMV_VLAN 0x4000
#define MAC_CTRL_PROMIS_EN 0x8000
#define MAC_CTRL_TX_PAUSE 0x10000
#define MAC_CTRL_SCNT 0x20000
#define MAC_CTRL_SRST_TX 0x40000
#define MAC_CTRL_TX_SIMURST 0x80000
#define MAC_CTRL_SPEED_SHIFT 20
#define MAC_CTRL_SPEED_MASK 0x300000
#define MAC_CTRL_SPEED_1000 2
#define MAC_CTRL_SPEED_10_100 1
#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000
#define MAC_CTRL_TX_HUGE 0x800000
#define MAC_CTRL_RX_CHKSUM_EN 0x1000000
#define MAC_CTRL_MC_ALL_EN 0x2000000
#define MAC_CTRL_BC_EN 0x4000000
#define MAC_CTRL_DBG 0x8000000
/* MAC IPG/IFG Control Register */
#define REG_MAC_IPG_IFG 0x1484
#define MAC_IPG_IFG_IPGT_SHIFT 0
#define MAC_IPG_IFG_IPGT_MASK 0x7f
#define MAC_IPG_IFG_MIFG_SHIFT 8
#define MAC_IPG_IFG_MIFG_MASK 0xff
#define MAC_IPG_IFG_IPGR1_SHIFT 16
#define MAC_IPG_IFG_IPGR1_MASK 0x7f
#define MAC_IPG_IFG_IPGR2_SHIFT 24
#define MAC_IPG_IFG_IPGR2_MASK 0x7f
/* MAC STATION ADDRESS */
#define REG_MAC_STA_ADDR 0x1488
/* Hash table for multicast address */
#define REG_RX_HASH_TABLE 0x1490
/* MAC Half-Duplex Control Register */
#define REG_MAC_HALF_DUPLX_CTRL 0x1498
#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0
#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3ff
#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12
#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xf
#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000
#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000
#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000
#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000
#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20
#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xf
#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24
#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xf
/* Maximum Frame Length Control Register */
#define REG_MTU 0x149c
/* Wake-On-Lan control register */
#define REG_WOL_CTRL 0x14a0
#define WOL_PATTERN_EN 0x00000001
#define WOL_PATTERN_PME_EN 0x00000002
#define WOL_MAGIC_EN 0x00000004
#define WOL_MAGIC_PME_EN 0x00000008
#define WOL_LINK_CHG_EN 0x00000010
#define WOL_LINK_CHG_PME_EN 0x00000020
#define WOL_PATTERN_ST 0x00000100
#define WOL_MAGIC_ST 0x00000200
#define WOL_LINKCHG_ST 0x00000400
#define WOL_CLK_SWITCH_EN 0x00008000
#define WOL_PT0_EN 0x00010000
#define WOL_PT1_EN 0x00020000
#define WOL_PT2_EN 0x00040000
#define WOL_PT3_EN 0x00080000
#define WOL_PT4_EN 0x00100000
#define WOL_PT5_EN 0x00200000
#define WOL_PT6_EN 0x00400000
/* WOL Length ( 2 DWORD ) */
#define REG_WOL_PATTERN_LEN 0x14a4
#define WOL_PT_LEN_MASK 0x7f
#define WOL_PT0_LEN_SHIFT 0
#define WOL_PT1_LEN_SHIFT 8
#define WOL_PT2_LEN_SHIFT 16
#define WOL_PT3_LEN_SHIFT 24
#define WOL_PT4_LEN_SHIFT 0
#define WOL_PT5_LEN_SHIFT 8
#define WOL_PT6_LEN_SHIFT 16
/* Internal SRAM Partition Register */
#define REG_SRAM_RFD_ADDR 0x1500
#define REG_SRAM_RFD_LEN (REG_SRAM_RFD_ADDR+ 4)
#define REG_SRAM_RRD_ADDR (REG_SRAM_RFD_ADDR+ 8)
#define REG_SRAM_RRD_LEN (REG_SRAM_RFD_ADDR+12)
#define REG_SRAM_TPD_ADDR (REG_SRAM_RFD_ADDR+16)
#define REG_SRAM_TPD_LEN (REG_SRAM_RFD_ADDR+20)
#define REG_SRAM_TRD_ADDR (REG_SRAM_RFD_ADDR+24)
#define REG_SRAM_TRD_LEN (REG_SRAM_RFD_ADDR+28)
#define REG_SRAM_RXF_ADDR (REG_SRAM_RFD_ADDR+32)
#define REG_SRAM_RXF_LEN (REG_SRAM_RFD_ADDR+36)
#define REG_SRAM_TXF_ADDR (REG_SRAM_RFD_ADDR+40)
#define REG_SRAM_TXF_LEN (REG_SRAM_RFD_ADDR+44)
#define REG_SRAM_TCPH_PATH_ADDR (REG_SRAM_RFD_ADDR+48)
#define SRAM_TCPH_ADDR_MASK 0x0fff
#define SRAM_TCPH_ADDR_SHIFT 0
#define SRAM_PATH_ADDR_MASK 0x0fff
#define SRAM_PATH_ADDR_SHIFT 16
/* Load Ptr Register */
#define REG_LOAD_PTR (REG_SRAM_RFD_ADDR+52)
/* Descriptor Control register */
#define REG_DESC_BASE_ADDR_HI 0x1540
#define REG_DESC_RFD_ADDR_LO (REG_DESC_BASE_ADDR_HI+4)
#define REG_DESC_RRD_ADDR_LO (REG_DESC_BASE_ADDR_HI+8)
#define REG_DESC_TPD_ADDR_LO (REG_DESC_BASE_ADDR_HI+12)
#define REG_DESC_CMB_ADDR_LO (REG_DESC_BASE_ADDR_HI+16)
#define REG_DESC_SMB_ADDR_LO (REG_DESC_BASE_ADDR_HI+20)
#define REG_DESC_RFD_RRD_RING_SIZE (REG_DESC_BASE_ADDR_HI+24)
#define DESC_RFD_RING_SIZE_MASK 0x7ff
#define DESC_RFD_RING_SIZE_SHIFT 0
#define DESC_RRD_RING_SIZE_MASK 0x7ff
#define DESC_RRD_RING_SIZE_SHIFT 16
#define REG_DESC_TPD_RING_SIZE (REG_DESC_BASE_ADDR_HI+28)
#define DESC_TPD_RING_SIZE_MASK 0x3ff
#define DESC_TPD_RING_SIZE_SHIFT 0
/* TXQ Control Register */
#define REG_TXQ_CTRL 0x1580
#define TXQ_CTRL_TPD_BURST_NUM_SHIFT 0
#define TXQ_CTRL_TPD_BURST_NUM_MASK 0x1f
#define TXQ_CTRL_EN 0x20
#define TXQ_CTRL_ENH_MODE 0x40
#define TXQ_CTRL_TPD_FETCH_TH_SHIFT 8
#define TXQ_CTRL_TPD_FETCH_TH_MASK 0x3f
#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16
#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xffff
/* Jumbo packet Threshold for task offload */
#define REG_TX_JUMBO_TASK_TH_TPD_IPG 0x1584
#define TX_JUMBO_TASK_TH_MASK 0x7ff
#define TX_JUMBO_TASK_TH_SHIFT 0
#define TX_TPD_MIN_IPG_MASK 0x1f
#define TX_TPD_MIN_IPG_SHIFT 16
/* RXQ Control Register */
#define REG_RXQ_CTRL 0x15a0
#define RXQ_CTRL_RFD_BURST_NUM_SHIFT 0
#define RXQ_CTRL_RFD_BURST_NUM_MASK 0xff
#define RXQ_CTRL_RRD_BURST_THRESH_SHIFT 8
#define RXQ_CTRL_RRD_BURST_THRESH_MASK 0xff
#define RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT 16
#define RXQ_CTRL_RFD_PREF_MIN_IPG_MASK 0x1f
#define RXQ_CTRL_CUT_THRU_EN 0x40000000
#define RXQ_CTRL_EN 0x80000000
/* Rx jumbo packet threshold and rrd retirement timer */
#define REG_RXQ_JMBOSZ_RRDTIM (REG_RXQ_CTRL+ 4)
#define RXQ_JMBOSZ_TH_MASK 0x7ff
#define RXQ_JMBOSZ_TH_SHIFT 0
#define RXQ_JMBO_LKAH_MASK 0xf
#define RXQ_JMBO_LKAH_SHIFT 11
#define RXQ_RRD_TIMER_MASK 0xffff
#define RXQ_RRD_TIMER_SHIFT 16
/* RFD flow control register */
#define REG_RXQ_RXF_PAUSE_THRESH (REG_RXQ_CTRL+ 8)
#define RXQ_RXF_PAUSE_TH_HI_SHIFT 16
#define RXQ_RXF_PAUSE_TH_HI_MASK 0xfff
#define RXQ_RXF_PAUSE_TH_LO_SHIFT 0
#define RXQ_RXF_PAUSE_TH_LO_MASK 0xfff
/* RRD flow control register */
#define REG_RXQ_RRD_PAUSE_THRESH (REG_RXQ_CTRL+12)
#define RXQ_RRD_PAUSE_TH_HI_SHIFT 0
#define RXQ_RRD_PAUSE_TH_HI_MASK 0xfff
#define RXQ_RRD_PAUSE_TH_LO_SHIFT 16
#define RXQ_RRD_PAUSE_TH_LO_MASK 0xfff
/* DMA Engine Control Register */
#define REG_DMA_CTRL 0x15c0
#define DMA_CTRL_DMAR_IN_ORDER 0x1
#define DMA_CTRL_DMAR_ENH_ORDER 0x2
#define DMA_CTRL_DMAR_OUT_ORDER 0x4
#define DMA_CTRL_RCB_VALUE 0x8
#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4
#define DMA_CTRL_DMAR_BURST_LEN_MASK 7
#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7
#define DMA_CTRL_DMAW_BURST_LEN_MASK 7
#define DMA_CTRL_DMAR_EN 0x400
#define DMA_CTRL_DMAW_EN 0x800
/* CMB/SMB Control Register */
#define REG_CSMB_CTRL 0x15d0
#define CSMB_CTRL_CMB_NOW 1
#define CSMB_CTRL_SMB_NOW 2
#define CSMB_CTRL_CMB_EN 4
#define CSMB_CTRL_SMB_EN 8
/* CMB DMA Write Threshold Register */
#define REG_CMB_WRITE_TH (REG_CSMB_CTRL+ 4)
#define CMB_RRD_TH_SHIFT 0
#define CMB_RRD_TH_MASK 0x7ff
#define CMB_TPD_TH_SHIFT 16
#define CMB_TPD_TH_MASK 0x7ff
/* RX/TX count-down timer to trigger CMB-write. 2us resolution. */
#define REG_CMB_WRITE_TIMER (REG_CSMB_CTRL+ 8)
#define CMB_RX_TM_SHIFT 0
#define CMB_RX_TM_MASK 0xffff
#define CMB_TX_TM_SHIFT 16
#define CMB_TX_TM_MASK 0xffff
/* Number of packet received since last CMB write */
#define REG_CMB_RX_PKT_CNT (REG_CSMB_CTRL+12)
/* Number of packet transmitted since last CMB write */
#define REG_CMB_TX_PKT_CNT (REG_CSMB_CTRL+16)
/* SMB auto DMA timer register */
#define REG_SMB_TIMER (REG_CSMB_CTRL+20)
/* Mailbox Register */
#define REG_MAILBOX 0x15f0
#define MB_RFD_PROD_INDX_SHIFT 0
#define MB_RFD_PROD_INDX_MASK 0x7ff
#define MB_RRD_CONS_INDX_SHIFT 11
#define MB_RRD_CONS_INDX_MASK 0x7ff
#define MB_TPD_PROD_INDX_SHIFT 22
#define MB_TPD_PROD_INDX_MASK 0x3ff
/* Interrupt Status Register */
#define REG_ISR 0x1600
#define ISR_SMB 1
#define ISR_TIMER 2
#define ISR_MANUAL 4
#define ISR_RXF_OV 8
#define ISR_RFD_UNRUN 0x10
#define ISR_RRD_OV 0x20
#define ISR_TXF_UNRUN 0x40
#define ISR_LINK 0x80
#define ISR_HOST_RFD_UNRUN 0x100
#define ISR_HOST_RRD_OV 0x200
#define ISR_DMAR_TO_RST 0x400
#define ISR_DMAW_TO_RST 0x800
#define ISR_GPHY 0x1000
#define ISR_RX_PKT 0x10000
#define ISR_TX_PKT 0x20000
#define ISR_TX_DMA 0x40000
#define ISR_RX_DMA 0x80000
#define ISR_CMB_RX 0x100000
#define ISR_CMB_TX 0x200000
#define ISR_MAC_RX 0x400000
#define ISR_MAC_TX 0x800000
#define ISR_UR_DETECTED 0x1000000
#define ISR_FERR_DETECTED 0x2000000
#define ISR_NFERR_DETECTED 0x4000000
#define ISR_CERR_DETECTED 0x8000000
#define ISR_PHY_LINKDOWN 0x10000000
#define ISR_DIS_SMB 0x20000000
#define ISR_DIS_DMA 0x40000000
#define ISR_DIS_INT 0x80000000
/* Interrupt Mask Register */
#define REG_IMR 0x1604
/* Normal Interrupt mask */
#define IMR_NORMAL_MASK (\
ISR_SMB |\
ISR_GPHY |\
ISR_PHY_LINKDOWN|\
ISR_DMAR_TO_RST |\
ISR_DMAW_TO_RST |\
ISR_CMB_TX |\
ISR_CMB_RX )
/* Debug Interrupt Mask (enable all interrupt) */
#define IMR_DEBUG_MASK (\
ISR_SMB |\
ISR_TIMER |\
ISR_MANUAL |\
ISR_RXF_OV |\
ISR_RFD_UNRUN |\
ISR_RRD_OV |\
ISR_TXF_UNRUN |\
ISR_LINK |\
ISR_CMB_TX |\
ISR_CMB_RX |\
ISR_RX_PKT |\
ISR_TX_PKT |\
ISR_MAC_RX |\
ISR_MAC_TX )
/* Interrupt Status Register */
#define REG_RFD_RRD_IDX 0x1800
#define REG_TPD_IDX 0x1804
/* MII definition */
/* PHY Common Register */
#define MII_AT001_CR 0x09
#define MII_AT001_SR 0x0A
#define MII_AT001_ESR 0x0F
#define MII_AT001_PSCR 0x10
#define MII_AT001_PSSR 0x11
/* PHY Control Register */
#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100, 00=10 */
#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100, 00=10 */
#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
#define MII_CR_SPEED_MASK 0x2040
#define MII_CR_SPEED_1000 0x0040
#define MII_CR_SPEED_100 0x2000
#define MII_CR_SPEED_10 0x0000
/* PHY Status Register */
#define MII_SR_EXTENDED_CAPS 0x0001 /* Extended register capabilities */
#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext. status info in Reg 0x0F */
#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
/* Link partner ability register. */
#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
#define MII_LPA_PAUSE 0x0400 /* PAUSE */
#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
#define MII_LPA_NPAGE 0x8000 /* Next page bit */
/* Autoneg Advertisement Register */
#define MII_AR_SELECTOR_FIELD 0x0001 /* indicates IEEE 802.3 CSMA/CD */
#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Direction bit */
#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability supported */
#define MII_AR_SPEED_MASK 0x01E0
#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
/* 1000BASE-T Control Register */
#define MII_AT001_CR_1000T_HD_CAPS 0x0100 /* Advertise 1000T HD capability */
#define MII_AT001_CR_1000T_FD_CAPS 0x0200 /* Advertise 1000T FD capability */
#define MII_AT001_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device port, 0=DTE device */
#define MII_AT001_CR_1000T_MS_VALUE 0x0800 /* 1=Configure PHY as Master, 0=Configure PHY as Slave */
#define MII_AT001_CR_1000T_MS_ENABLE 0x1000 /* 1=Master/Slave manual config value, 0=Automatic Master/Slave config */
#define MII_AT001_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
#define MII_AT001_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
#define MII_AT001_CR_1000T_TEST_MODE_2 0x4000 /* Master Transmit Jitter test */
#define MII_AT001_CR_1000T_TEST_MODE_3 0x6000 /* Slave Transmit Jitter test */
#define MII_AT001_CR_1000T_TEST_MODE_4 0x8000 /* Transmitter Distortion test */
#define MII_AT001_CR_1000T_SPEED_MASK 0x0300
#define MII_AT001_CR_1000T_DEFAULT_CAP_MASK 0x0300
/* 1000BASE-T Status Register */
#define MII_AT001_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
#define MII_AT001_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
#define MII_AT001_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
#define MII_AT001_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
#define MII_AT001_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master, 0=Slave */
#define MII_AT001_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config fault */
#define MII_AT001_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
#define MII_AT001_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
/* Extended Status Register */
#define MII_AT001_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
#define MII_AT001_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
#define MII_AT001_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
#define MII_AT001_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
/* AT001 PHY Specific Control Register */
#define MII_AT001_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Function disabled */
#define MII_AT001_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
#define MII_AT001_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
#define MII_AT001_PSCR_MAC_POWERDOWN 0x0008
#define MII_AT001_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low, 0=CLK125 toggling */
#define MII_AT001_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5, Manual MDI configuration */
#define MII_AT001_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
#define MII_AT001_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
#define MII_AT001_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled all speeds. */
#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE 0x0080 /* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T RX Threshold), 0=Normal 10BASE-T RX Threshold */
#define MII_AT001_PSCR_MII_5BIT_ENABLE 0x0100 /* 1=5-Bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
#define MII_AT001_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler disable */
#define MII_AT001_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
#define MII_AT001_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
#define MII_AT001_PSCR_POLARITY_REVERSAL_SHIFT 1
#define MII_AT001_PSCR_AUTO_X_MODE_SHIFT 5
#define MII_AT001_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
/* AT001 PHY Specific Status Register */
#define MII_AT001_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
#define MII_AT001_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
#define MII_AT001_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
#define MII_AT001_PSSR_10MBS 0x0000 /* 00=10Mbs */
#define MII_AT001_PSSR_100MBS 0x4000 /* 01=100Mbs */
#define MII_AT001_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
/* PCI Command Register Bit Definitions */
#define PCI_REG_COMMAND 0x04 /* PCI Command Register */
#define CMD_IO_SPACE 0x0001
#define CMD_MEMORY_SPACE 0x0002
#define CMD_BUS_MASTER 0x0004
/* Wake Up Filter Control */
#define ATL1_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
#define ATL1_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
#define ATL1_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define ATL1_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */
#define ATL1_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
/* Error Codes */
#define ATL1_SUCCESS 0
#define ATL1_ERR_EEPROM 1
#define ATL1_ERR_PHY 2
#define ATL1_ERR_CONFIG 3
#define ATL1_ERR_PARAM 4
#define ATL1_ERR_MAC_TYPE 5
#define ATL1_ERR_PHY_TYPE 6
#define ATL1_ERR_PHY_SPEED 7
#define ATL1_ERR_PHY_RES 8
#define SPEED_0 0xffff
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
#define MEDIA_TYPE_AUTO_SENSOR 0
#define MEDIA_TYPE_1000M_FULL 1
#define MEDIA_TYPE_100M_FULL 2
#define MEDIA_TYPE_100M_HALF 3
#define MEDIA_TYPE_10M_FULL 4
#define MEDIA_TYPE_10M_HALF 5
#define ADVERTISE_10_HALF 0x0001
#define ADVERTISE_10_FULL 0x0002
#define ADVERTISE_100_HALF 0x0004
#define ADVERTISE_100_FULL 0x0008
#define ADVERTISE_1000_HALF 0x0010
#define ADVERTISE_1000_FULL 0x0020
#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* Everything but 1000-Half */
#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */
#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */
#define MAX_JUMBO_FRAME_SIZE 0x2800
#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
#define PHY_FORCE_TIME 20 /* 2.0 Seconds */
/* For checksumming , the sum of all words in the EEPROM should equal 0xBABA */
#define EEPROM_SUM 0xBABA
#define ATL1_EEDUMP_LEN 48
/* Statistics counters collected by the MAC */
struct stats_msg_block {
/* rx */
u32 rx_ok; /* The number of good packet received. */
u32 rx_bcast; /* The number of good broadcast packet received. */
u32 rx_mcast; /* The number of good multicast packet received. */
u32 rx_pause; /* The number of Pause packet received. */
u32 rx_ctrl; /* The number of Control packet received other than Pause frame. */
u32 rx_fcs_err; /* The number of packets with bad FCS. */
u32 rx_len_err; /* The number of packets with mismatch of length field and actual size. */
u32 rx_byte_cnt; /* The number of bytes of good packet received. FCS is NOT included. */
u32 rx_runt; /* The number of packets received that are less than 64 byte long and with good FCS. */
u32 rx_frag; /* The number of packets received that are less than 64 byte long and with bad FCS. */
u32 rx_sz_64; /* The number of good and bad packets received that are 64 byte long. */
u32 rx_sz_65_127; /* The number of good and bad packets received that are between 65 and 127-byte long. */
u32 rx_sz_128_255; /* The number of good and bad packets received that are between 128 and 255-byte long. */
u32 rx_sz_256_511; /* The number of good and bad packets received that are between 256 and 511-byte long. */
u32 rx_sz_512_1023; /* The number of good and bad packets received that are between 512 and 1023-byte long. */
u32 rx_sz_1024_1518; /* The number of good and bad packets received that are between 1024 and 1518-byte long. */
u32 rx_sz_1519_max; /* The number of good and bad packets received that are between 1519-byte and MTU. */
u32 rx_sz_ov; /* The number of good and bad packets received that are more than MTU size šC truncated by Selene. */
u32 rx_rxf_ov; /* The number of frame dropped due to occurrence of RX FIFO overflow. */
u32 rx_rrd_ov; /* The number of frame dropped due to occurrence of RRD overflow. */
u32 rx_align_err; /* Alignment Error */
u32 rx_bcast_byte_cnt; /* The byte count of broadcast packet received, excluding FCS. */
u32 rx_mcast_byte_cnt; /* The byte count of multicast packet received, excluding FCS. */
u32 rx_err_addr; /* The number of packets dropped due to address filtering. */
/* tx */
u32 tx_ok; /* The number of good packet transmitted. */
u32 tx_bcast; /* The number of good broadcast packet transmitted. */
u32 tx_mcast; /* The number of good multicast packet transmitted. */
u32 tx_pause; /* The number of Pause packet transmitted. */
u32 tx_exc_defer; /* The number of packets transmitted with excessive deferral. */
u32 tx_ctrl; /* The number of packets transmitted is a control frame, excluding Pause frame. */
u32 tx_defer; /* The number of packets transmitted that is deferred. */
u32 tx_byte_cnt; /* The number of bytes of data transmitted. FCS is NOT included. */
u32 tx_sz_64; /* The number of good and bad packets transmitted that are 64 byte long. */
u32 tx_sz_65_127; /* The number of good and bad packets transmitted that are between 65 and 127-byte long. */
u32 tx_sz_128_255; /* The number of good and bad packets transmitted that are between 128 and 255-byte long. */
u32 tx_sz_256_511; /* The number of good and bad packets transmitted that are between 256 and 511-byte long. */
u32 tx_sz_512_1023; /* The number of good and bad packets transmitted that are between 512 and 1023-byte long. */
u32 tx_sz_1024_1518; /* The number of good and bad packets transmitted that are between 1024 and 1518-byte long. */
u32 tx_sz_1519_max; /* The number of good and bad packets transmitted that are between 1519-byte and MTU. */
u32 tx_1_col; /* The number of packets subsequently transmitted successfully with a single prior collision. */
u32 tx_2_col; /* The number of packets subsequently transmitted successfully with multiple prior collisions. */
u32 tx_late_col; /* The number of packets transmitted with late collisions. */
u32 tx_abort_col; /* The number of transmit packets aborted due to excessive collisions. */
u32 tx_underrun; /* The number of transmit packets aborted due to transmit FIFO underrun, or TRD FIFO underrun */
u32 tx_rd_eop; /* The number of times that read beyond the EOP into the next frame area when TRD was not written timely */
u32 tx_len_err; /* The number of transmit packets with length field does NOT match the actual frame size. */
u32 tx_trunc; /* The number of transmit packets truncated due to size exceeding MTU. */
u32 tx_bcast_byte; /* The byte count of broadcast packet transmitted, excluding FCS. */
u32 tx_mcast_byte; /* The byte count of multicast packet transmitted, excluding FCS. */
u32 smb_updated; /* 1: SMB Updated. This is used by software as the indication of the statistics update.
* Software should clear this bit as soon as retrieving the statistics information. */
};
/* Coalescing Message Block */
struct coals_msg_block {
u32 int_stats; /* interrupt status */
u16 rrd_prod_idx; /* TRD Producer Index. */
u16 rfd_cons_idx; /* RFD Consumer Index. */
u16 update; /* Selene sets this bit every time it DMA the CMB to host memory.
* Software supposes to clear this bit when CMB information is processed. */
u16 tpd_cons_idx; /* TPD Consumer Index. */
};
/* RRD descriptor */
struct rx_return_desc {
u8 num_buf; /* Number of RFD buffers used by the received packet */
u8 resved;
u16 buf_indx; /* RFD Index of the first buffer */
union {
u32 valid;
struct {
u16 rx_chksum;
u16 pkt_size;
} xsum_sz;
} xsz;
u16 pkt_flg; /* Packet flags */
u16 err_flg; /* Error flags */
u16 resved2;
u16 vlan_tag; /* VLAN TAG */
};
#define PACKET_FLAG_ETH_TYPE 0x0080
#define PACKET_FLAG_VLAN_INS 0x0100
#define PACKET_FLAG_ERR 0x0200
#define PACKET_FLAG_IPV4 0x0400
#define PACKET_FLAG_UDP 0x0800
#define PACKET_FLAG_TCP 0x1000
#define PACKET_FLAG_BCAST 0x2000
#define PACKET_FLAG_MCAST 0x4000
#define PACKET_FLAG_PAUSE 0x8000
#define ERR_FLAG_CRC 0x0001
#define ERR_FLAG_CODE 0x0002
#define ERR_FLAG_DRIBBLE 0x0004
#define ERR_FLAG_RUNT 0x0008
#define ERR_FLAG_OV 0x0010
#define ERR_FLAG_TRUNC 0x0020
#define ERR_FLAG_IP_CHKSUM 0x0040
#define ERR_FLAG_L4_CHKSUM 0x0080
#define ERR_FLAG_LEN 0x0100
#define ERR_FLAG_DES_ADDR 0x0200
/* RFD descriptor */
struct rx_free_desc {
__le64 buffer_addr; /* Address of the descriptor's data buffer */
__le16 buf_len; /* Size of the receive buffer in host memory, in byte */
u16 coalese; /* Update consumer index to host after the reception of this frame */
/* __attribute__ ((packed)) is required */
} __attribute__ ((packed));
/* tsopu defines */
#define TSO_PARAM_BUFLEN_MASK 0x3FFF
#define TSO_PARAM_BUFLEN_SHIFT 0
#define TSO_PARAM_DMAINT_MASK 0x0001
#define TSO_PARAM_DMAINT_SHIFT 14
#define TSO_PARAM_PKTNT_MASK 0x0001
#define TSO_PARAM_PKTINT_SHIFT 15
#define TSO_PARAM_VLANTAG_MASK 0xFFFF
#define TSO_PARAM_VLAN_SHIFT 16
/* tsopl defines */
#define TSO_PARAM_EOP_MASK 0x0001
#define TSO_PARAM_EOP_SHIFT 0
#define TSO_PARAM_COALESCE_MASK 0x0001
#define TSO_PARAM_COALESCE_SHIFT 1
#define TSO_PARAM_INSVLAG_MASK 0x0001
#define TSO_PARAM_INSVLAG_SHIFT 2
#define TSO_PARAM_CUSTOMCKSUM_MASK 0x0001
#define TSO_PARAM_CUSTOMCKSUM_SHIFT 3
#define TSO_PARAM_SEGMENT_MASK 0x0001
#define TSO_PARAM_SEGMENT_SHIFT 4
#define TSO_PARAM_IPCKSUM_MASK 0x0001
#define TSO_PARAM_IPCKSUM_SHIFT 5
#define TSO_PARAM_TCPCKSUM_MASK 0x0001
#define TSO_PARAM_TCPCKSUM_SHIFT 6
#define TSO_PARAM_UDPCKSUM_MASK 0x0001
#define TSO_PARAM_UDPCKSUM_SHIFT 7
#define TSO_PARAM_VLANTAGGED_MASK 0x0001
#define TSO_PARAM_VLANTAGGED_SHIFT 8
#define TSO_PARAM_ETHTYPE_MASK 0x0001
#define TSO_PARAM_ETHTYPE_SHIFT 9
#define TSO_PARAM_IPHL_MASK 0x000F
#define TSO_PARAM_IPHL_SHIFT 10
#define TSO_PARAM_TCPHDRLEN_MASK 0x000F
#define TSO_PARAM_TCPHDRLEN_SHIFT 14
#define TSO_PARAM_HDRFLAG_MASK 0x0001
#define TSO_PARAM_HDRFLAG_SHIFT 18
#define TSO_PARAM_MSS_MASK 0x1FFF
#define TSO_PARAM_MSS_SHIFT 19
/* csumpu defines */
#define CSUM_PARAM_BUFLEN_MASK 0x3FFF
#define CSUM_PARAM_BUFLEN_SHIFT 0
#define CSUM_PARAM_DMAINT_MASK 0x0001
#define CSUM_PARAM_DMAINT_SHIFT 14
#define CSUM_PARAM_PKTINT_MASK 0x0001
#define CSUM_PARAM_PKTINT_SHIFT 15
#define CSUM_PARAM_VALANTAG_MASK 0xFFFF
#define CSUM_PARAM_VALAN_SHIFT 16
/* csumpl defines*/
#define CSUM_PARAM_EOP_MASK 0x0001
#define CSUM_PARAM_EOP_SHIFT 0
#define CSUM_PARAM_COALESCE_MASK 0x0001
#define CSUM_PARAM_COALESCE_SHIFT 1
#define CSUM_PARAM_INSVLAG_MASK 0x0001
#define CSUM_PARAM_INSVLAG_SHIFT 2
#define CSUM_PARAM_CUSTOMCKSUM_MASK 0x0001
#define CSUM_PARAM_CUSTOMCKSUM_SHIFT 3
#define CSUM_PARAM_SEGMENT_MASK 0x0001
#define CSUM_PARAM_SEGMENT_SHIFT 4
#define CSUM_PARAM_IPCKSUM_MASK 0x0001
#define CSUM_PARAM_IPCKSUM_SHIFT 5
#define CSUM_PARAM_TCPCKSUM_MASK 0x0001
#define CSUM_PARAM_TCPCKSUM_SHIFT 6
#define CSUM_PARAM_UDPCKSUM_MASK 0x0001
#define CSUM_PARAM_UDPCKSUM_SHIFT 7
#define CSUM_PARAM_VLANTAGGED_MASK 0x0001
#define CSUM_PARAM_VLANTAGGED_SHIFT 8
#define CSUM_PARAM_ETHTYPE_MASK 0x0001
#define CSUM_PARAM_ETHTYPE_SHIFT 9
#define CSUM_PARAM_IPHL_MASK 0x000F
#define CSUM_PARAM_IPHL_SHIFT 10
#define CSUM_PARAM_PLOADOFFSET_MASK 0x00FF
#define CSUM_PARAM_PLOADOFFSET_SHIFT 16
#define CSUM_PARAM_XSUMOFFSET_MASK 0x00FF
#define CSUM_PARAM_XSUMOFFSET_SHIFT 24
/* TPD descriptor */
struct tso_param {
/* The order of these declarations is important -- don't change it */
u32 tsopu; /* tso_param upper word */
u32 tsopl; /* tso_param lower word */
};
struct csum_param {
/* The order of these declarations is important -- don't change it */
u32 csumpu; /* csum_param upper word */
u32 csumpl; /* csum_param lower word */
};
union tpd_descr {
u64 data;
struct csum_param csum;
struct tso_param tso;
};
struct tx_packet_desc {
__le64 buffer_addr;
union tpd_descr desc;
};
/* DMA Order Settings */
enum atl1_dma_order {
atl1_dma_ord_in = 1,
atl1_dma_ord_enh = 2,
atl1_dma_ord_out = 4
};
enum atl1_dma_rcb {
atl1_rcb_64 = 0,
atl1_rcb_128 = 1
};
enum atl1_dma_req_block {
atl1_dma_req_128 = 0,
atl1_dma_req_256 = 1,
atl1_dma_req_512 = 2,
atl1_dma_req_1024 = 3,
atl1_dma_req_2048 = 4,
atl1_dma_req_4096 = 5
};
struct atl1_spi_flash_dev {
const char *manu_name; /* manufacturer id */
/* op-code */
u8 cmd_wrsr;
u8 cmd_read;
u8 cmd_program;
u8 cmd_wren;
u8 cmd_wrdi;
u8 cmd_rdsr;
u8 cmd_rdid;
u8 cmd_sector_erase;
u8 cmd_chip_erase;
};
#endif /* _ATL1_HW_H_ */

Просмотреть файл

@ -1,203 +0,0 @@
/*
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
* Copyright(c) 2006 Chris Snook <csnook@redhat.com>
* Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/types.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include "atl1.h"
/*
* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
#define ATL1_MAX_NIC 4
#define OPTION_UNSET -1
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
/*
* Interrupt Moderate Timer in units of 2 us
*
* Valid Range: 10-65535
*
* Default Value: 100 (200us)
*/
static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
static int num_int_mod_timer = 0;
module_param_array_named(int_mod_timer, int_mod_timer, int, &num_int_mod_timer, 0);
MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
/*
* flash_vendor
*
* Valid Range: 0-2
*
* 0 - Atmel
* 1 - SST
* 2 - ST
*
* Default Value: 0
*/
static int __devinitdata flash_vendor[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
static int num_flash_vendor = 0;
module_param_array_named(flash_vendor, flash_vendor, int, &num_flash_vendor, 0);
MODULE_PARM_DESC(flash_vendor, "SPI flash vendor");
#define DEFAULT_INT_MOD_CNT 100 /* 200us */
#define MAX_INT_MOD_CNT 65000
#define MIN_INT_MOD_CNT 50
#define FLASH_VENDOR_DEFAULT 0
#define FLASH_VENDOR_MIN 0
#define FLASH_VENDOR_MAX 2
struct atl1_option {
enum { enable_option, range_option, list_option } type;
char *name;
char *err;
int def;
union {
struct { /* range_option info */
int min;
int max;
} r;
struct { /* list_option info */
int nr;
struct atl1_opt_list {
int i;
char *str;
} *p;
} l;
} arg;
};
static int __devinit atl1_validate_option(int *value, struct atl1_option *opt, struct pci_dev *pdev)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
switch (opt->type) {
case enable_option:
switch (*value) {
case OPTION_ENABLED:
dev_info(&pdev->dev, "%s enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
dev_info(&pdev->dev, "%s disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
dev_info(&pdev->dev, "%s set to %i\n", opt->name,
*value);
return 0;
}
break;
case list_option:{
int i;
struct atl1_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
dev_info(&pdev->dev, "%s\n",
ent->str);
return 0;
}
}
}
break;
default:
break;
}
dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
/*
* atl1_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
* This routine checks all command line parameters for valid user
* input. If an invalid value is given, or if no user specified
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
*/
void __devinit atl1_check_options(struct atl1_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
int bd = adapter->bd_number;
if (bd >= ATL1_MAX_NIC) {
dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
dev_notice(&pdev->dev, "using defaults for all values\n");
}
{ /* Interrupt Moderate Timer */
struct atl1_option opt = {
.type = range_option,
.name = "Interrupt Moderator Timer",
.err = "using default of "
__MODULE_STRING(DEFAULT_INT_MOD_CNT),
.def = DEFAULT_INT_MOD_CNT,
.arg = {.r =
{.min = MIN_INT_MOD_CNT,.max = MAX_INT_MOD_CNT}}
};
int val;
if (num_int_mod_timer > bd) {
val = int_mod_timer[bd];
atl1_validate_option(&val, &opt, pdev);
adapter->imt = (u16) val;
} else
adapter->imt = (u16) (opt.def);
}
{ /* Flash Vendor */
struct atl1_option opt = {
.type = range_option,
.name = "SPI Flash Vendor",
.err = "using default of "
__MODULE_STRING(FLASH_VENDOR_DEFAULT),
.def = DEFAULT_INT_MOD_CNT,
.arg = {.r =
{.min = FLASH_VENDOR_MIN,.max =
FLASH_VENDOR_MAX}}
};
int val;
if (num_flash_vendor > bd) {
val = flash_vendor[bd];
atl1_validate_option(&val, &opt, pdev);
adapter->hw.flash_vendor = (u8) val;
} else
adapter->hw.flash_vendor = (u8) (opt.def);
}
}

Просмотреть файл

@ -0,0 +1 @@
obj-$(CONFIG_ATL1) += atl1.o

Разница между файлами не показана из-за своего большого размера Загрузить разницу

796
drivers/net/atlx/atl1.h Normal file
Просмотреть файл

@ -0,0 +1,796 @@
/*
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
* Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
* Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef ATL1_H
#define ATL1_H
#include <linux/compiler.h>
#include <linux/ethtool.h>
#include <linux/if_vlan.h>
#include <linux/mii.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "atlx.h"
#define ATLX_DRIVER_NAME "atl1"
MODULE_DESCRIPTION("Atheros L1 Gigabit Ethernet Driver");
#define atlx_adapter atl1_adapter
#define atlx_check_for_link atl1_check_for_link
#define atlx_check_link atl1_check_link
#define atlx_hash_mc_addr atl1_hash_mc_addr
#define atlx_hash_set atl1_hash_set
#define atlx_hw atl1_hw
#define atlx_mii_ioctl atl1_mii_ioctl
#define atlx_read_phy_reg atl1_read_phy_reg
#define atlx_set_mac atl1_set_mac
#define atlx_set_mac_addr atl1_set_mac_addr
struct atl1_adapter;
struct atl1_hw;
/* function prototypes needed by multiple files */
u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr);
void atl1_hash_set(struct atl1_hw *hw, u32 hash_value);
s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data);
void atl1_set_mac_addr(struct atl1_hw *hw);
static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd);
static u32 atl1_check_link(struct atl1_adapter *adapter);
extern const struct ethtool_ops atl1_ethtool_ops;
/* hardware definitions specific to L1 */
/* Block IDLE Status Register */
#define IDLE_STATUS_RXMAC 0x1
#define IDLE_STATUS_TXMAC 0x2
#define IDLE_STATUS_RXQ 0x4
#define IDLE_STATUS_TXQ 0x8
#define IDLE_STATUS_DMAR 0x10
#define IDLE_STATUS_DMAW 0x20
#define IDLE_STATUS_SMB 0x40
#define IDLE_STATUS_CMB 0x80
/* MDIO Control Register */
#define MDIO_WAIT_TIMES 30
/* MAC Control Register */
#define MAC_CTRL_TX_PAUSE 0x10000
#define MAC_CTRL_SCNT 0x20000
#define MAC_CTRL_SRST_TX 0x40000
#define MAC_CTRL_TX_SIMURST 0x80000
#define MAC_CTRL_SPEED_SHIFT 20
#define MAC_CTRL_SPEED_MASK 0x300000
#define MAC_CTRL_SPEED_1000 0x2
#define MAC_CTRL_SPEED_10_100 0x1
#define MAC_CTRL_DBG_TX_BKPRESURE 0x400000
#define MAC_CTRL_TX_HUGE 0x800000
#define MAC_CTRL_RX_CHKSUM_EN 0x1000000
#define MAC_CTRL_DBG 0x8000000
/* Wake-On-Lan control register */
#define WOL_CLK_SWITCH_EN 0x8000
#define WOL_PT5_EN 0x200000
#define WOL_PT6_EN 0x400000
#define WOL_PT5_MATCH 0x8000000
#define WOL_PT6_MATCH 0x10000000
/* WOL Length ( 2 DWORD ) */
#define REG_WOL_PATTERN_LEN 0x14A4
#define WOL_PT_LEN_MASK 0x7F
#define WOL_PT0_LEN_SHIFT 0
#define WOL_PT1_LEN_SHIFT 8
#define WOL_PT2_LEN_SHIFT 16
#define WOL_PT3_LEN_SHIFT 24
#define WOL_PT4_LEN_SHIFT 0
#define WOL_PT5_LEN_SHIFT 8
#define WOL_PT6_LEN_SHIFT 16
/* Internal SRAM Partition Registers, low 32 bits */
#define REG_SRAM_RFD_LEN 0x1504
#define REG_SRAM_RRD_ADDR 0x1508
#define REG_SRAM_RRD_LEN 0x150C
#define REG_SRAM_TPD_ADDR 0x1510
#define REG_SRAM_TPD_LEN 0x1514
#define REG_SRAM_TRD_ADDR 0x1518
#define REG_SRAM_TRD_LEN 0x151C
#define REG_SRAM_RXF_ADDR 0x1520
#define REG_SRAM_RXF_LEN 0x1524
#define REG_SRAM_TXF_ADDR 0x1528
#define REG_SRAM_TXF_LEN 0x152C
#define REG_SRAM_TCPH_PATH_ADDR 0x1530
#define SRAM_TCPH_ADDR_MASK 0xFFF
#define SRAM_TCPH_ADDR_SHIFT 0
#define SRAM_PATH_ADDR_MASK 0xFFF
#define SRAM_PATH_ADDR_SHIFT 16
/* Load Ptr Register */
#define REG_LOAD_PTR 0x1534
/* Descriptor Control registers, low 32 bits */
#define REG_DESC_RFD_ADDR_LO 0x1544
#define REG_DESC_RRD_ADDR_LO 0x1548
#define REG_DESC_TPD_ADDR_LO 0x154C
#define REG_DESC_CMB_ADDR_LO 0x1550
#define REG_DESC_SMB_ADDR_LO 0x1554
#define REG_DESC_RFD_RRD_RING_SIZE 0x1558
#define DESC_RFD_RING_SIZE_MASK 0x7FF
#define DESC_RFD_RING_SIZE_SHIFT 0
#define DESC_RRD_RING_SIZE_MASK 0x7FF
#define DESC_RRD_RING_SIZE_SHIFT 16
#define REG_DESC_TPD_RING_SIZE 0x155C
#define DESC_TPD_RING_SIZE_MASK 0x3FF
#define DESC_TPD_RING_SIZE_SHIFT 0
/* TXQ Control Register */
#define REG_TXQ_CTRL 0x1580
#define TXQ_CTRL_TPD_BURST_NUM_SHIFT 0
#define TXQ_CTRL_TPD_BURST_NUM_MASK 0x1F
#define TXQ_CTRL_EN 0x20
#define TXQ_CTRL_ENH_MODE 0x40
#define TXQ_CTRL_TPD_FETCH_TH_SHIFT 8
#define TXQ_CTRL_TPD_FETCH_TH_MASK 0x3F
#define TXQ_CTRL_TXF_BURST_NUM_SHIFT 16
#define TXQ_CTRL_TXF_BURST_NUM_MASK 0xFFFF
/* Jumbo packet Threshold for task offload */
#define REG_TX_JUMBO_TASK_TH_TPD_IPG 0x1584
#define TX_JUMBO_TASK_TH_MASK 0x7FF
#define TX_JUMBO_TASK_TH_SHIFT 0
#define TX_TPD_MIN_IPG_MASK 0x1F
#define TX_TPD_MIN_IPG_SHIFT 16
/* RXQ Control Register */
#define REG_RXQ_CTRL 0x15A0
#define RXQ_CTRL_RFD_BURST_NUM_SHIFT 0
#define RXQ_CTRL_RFD_BURST_NUM_MASK 0xFF
#define RXQ_CTRL_RRD_BURST_THRESH_SHIFT 8
#define RXQ_CTRL_RRD_BURST_THRESH_MASK 0xFF
#define RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT 16
#define RXQ_CTRL_RFD_PREF_MIN_IPG_MASK 0x1F
#define RXQ_CTRL_CUT_THRU_EN 0x40000000
#define RXQ_CTRL_EN 0x80000000
/* Rx jumbo packet threshold and rrd retirement timer */
#define REG_RXQ_JMBOSZ_RRDTIM 0x15A4
#define RXQ_JMBOSZ_TH_MASK 0x7FF
#define RXQ_JMBOSZ_TH_SHIFT 0
#define RXQ_JMBO_LKAH_MASK 0xF
#define RXQ_JMBO_LKAH_SHIFT 11
#define RXQ_RRD_TIMER_MASK 0xFFFF
#define RXQ_RRD_TIMER_SHIFT 16
/* RFD flow control register */
#define REG_RXQ_RXF_PAUSE_THRESH 0x15A8
#define RXQ_RXF_PAUSE_TH_HI_SHIFT 16
#define RXQ_RXF_PAUSE_TH_HI_MASK 0xFFF
#define RXQ_RXF_PAUSE_TH_LO_SHIFT 0
#define RXQ_RXF_PAUSE_TH_LO_MASK 0xFFF
/* RRD flow control register */
#define REG_RXQ_RRD_PAUSE_THRESH 0x15AC
#define RXQ_RRD_PAUSE_TH_HI_SHIFT 0
#define RXQ_RRD_PAUSE_TH_HI_MASK 0xFFF
#define RXQ_RRD_PAUSE_TH_LO_SHIFT 16
#define RXQ_RRD_PAUSE_TH_LO_MASK 0xFFF
/* DMA Engine Control Register */
#define REG_DMA_CTRL 0x15C0
#define DMA_CTRL_DMAR_IN_ORDER 0x1
#define DMA_CTRL_DMAR_ENH_ORDER 0x2
#define DMA_CTRL_DMAR_OUT_ORDER 0x4
#define DMA_CTRL_RCB_VALUE 0x8
#define DMA_CTRL_DMAR_BURST_LEN_SHIFT 4
#define DMA_CTRL_DMAR_BURST_LEN_MASK 7
#define DMA_CTRL_DMAW_BURST_LEN_SHIFT 7
#define DMA_CTRL_DMAW_BURST_LEN_MASK 7
#define DMA_CTRL_DMAR_EN 0x400
#define DMA_CTRL_DMAW_EN 0x800
/* CMB/SMB Control Register */
#define REG_CSMB_CTRL 0x15D0
#define CSMB_CTRL_CMB_NOW 1
#define CSMB_CTRL_SMB_NOW 2
#define CSMB_CTRL_CMB_EN 4
#define CSMB_CTRL_SMB_EN 8
/* CMB DMA Write Threshold Register */
#define REG_CMB_WRITE_TH 0x15D4
#define CMB_RRD_TH_SHIFT 0
#define CMB_RRD_TH_MASK 0x7FF
#define CMB_TPD_TH_SHIFT 16
#define CMB_TPD_TH_MASK 0x7FF
/* RX/TX count-down timer to trigger CMB-write. 2us resolution. */
#define REG_CMB_WRITE_TIMER 0x15D8
#define CMB_RX_TM_SHIFT 0
#define CMB_RX_TM_MASK 0xFFFF
#define CMB_TX_TM_SHIFT 16
#define CMB_TX_TM_MASK 0xFFFF
/* Number of packet received since last CMB write */
#define REG_CMB_RX_PKT_CNT 0x15DC
/* Number of packet transmitted since last CMB write */
#define REG_CMB_TX_PKT_CNT 0x15E0
/* SMB auto DMA timer register */
#define REG_SMB_TIMER 0x15E4
/* Mailbox Register */
#define REG_MAILBOX 0x15F0
#define MB_RFD_PROD_INDX_SHIFT 0
#define MB_RFD_PROD_INDX_MASK 0x7FF
#define MB_RRD_CONS_INDX_SHIFT 11
#define MB_RRD_CONS_INDX_MASK 0x7FF
#define MB_TPD_PROD_INDX_SHIFT 22
#define MB_TPD_PROD_INDX_MASK 0x3FF
/* Interrupt Status Register */
#define ISR_SMB 0x1
#define ISR_TIMER 0x2
#define ISR_MANUAL 0x4
#define ISR_RXF_OV 0x8
#define ISR_RFD_UNRUN 0x10
#define ISR_RRD_OV 0x20
#define ISR_TXF_UNRUN 0x40
#define ISR_LINK 0x80
#define ISR_HOST_RFD_UNRUN 0x100
#define ISR_HOST_RRD_OV 0x200
#define ISR_DMAR_TO_RST 0x400
#define ISR_DMAW_TO_RST 0x800
#define ISR_GPHY 0x1000
#define ISR_RX_PKT 0x10000
#define ISR_TX_PKT 0x20000
#define ISR_TX_DMA 0x40000
#define ISR_RX_DMA 0x80000
#define ISR_CMB_RX 0x100000
#define ISR_CMB_TX 0x200000
#define ISR_MAC_RX 0x400000
#define ISR_MAC_TX 0x800000
#define ISR_DIS_SMB 0x20000000
#define ISR_DIS_DMA 0x40000000
/* Normal Interrupt mask */
#define IMR_NORMAL_MASK (\
ISR_SMB |\
ISR_GPHY |\
ISR_PHY_LINKDOWN|\
ISR_DMAR_TO_RST |\
ISR_DMAW_TO_RST |\
ISR_CMB_TX |\
ISR_CMB_RX)
/* Debug Interrupt Mask (enable all interrupt) */
#define IMR_DEBUG_MASK (\
ISR_SMB |\
ISR_TIMER |\
ISR_MANUAL |\
ISR_RXF_OV |\
ISR_RFD_UNRUN |\
ISR_RRD_OV |\
ISR_TXF_UNRUN |\
ISR_LINK |\
ISR_CMB_TX |\
ISR_CMB_RX |\
ISR_RX_PKT |\
ISR_TX_PKT |\
ISR_MAC_RX |\
ISR_MAC_TX)
#define MEDIA_TYPE_1000M_FULL 1
#define MEDIA_TYPE_100M_FULL 2
#define MEDIA_TYPE_100M_HALF 3
#define MEDIA_TYPE_10M_FULL 4
#define MEDIA_TYPE_10M_HALF 5
#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F /* All but 1000-Half */
#define MAX_JUMBO_FRAME_SIZE 10240
#define ATL1_EEDUMP_LEN 48
/* Statistics counters collected by the MAC */
struct stats_msg_block {
/* rx */
u32 rx_ok; /* good RX packets */
u32 rx_bcast; /* good RX broadcast packets */
u32 rx_mcast; /* good RX multicast packets */
u32 rx_pause; /* RX pause frames */
u32 rx_ctrl; /* RX control packets other than pause frames */
u32 rx_fcs_err; /* RX packets with bad FCS */
u32 rx_len_err; /* RX packets with length != actual size */
u32 rx_byte_cnt; /* good bytes received. FCS is NOT included */
u32 rx_runt; /* RX packets < 64 bytes with good FCS */
u32 rx_frag; /* RX packets < 64 bytes with bad FCS */
u32 rx_sz_64; /* 64 byte RX packets */
u32 rx_sz_65_127;
u32 rx_sz_128_255;
u32 rx_sz_256_511;
u32 rx_sz_512_1023;
u32 rx_sz_1024_1518;
u32 rx_sz_1519_max; /* 1519 byte to MTU RX packets */
u32 rx_sz_ov; /* truncated RX packets > MTU */
u32 rx_rxf_ov; /* frames dropped due to RX FIFO overflow */
u32 rx_rrd_ov; /* frames dropped due to RRD overflow */
u32 rx_align_err; /* alignment errors */
u32 rx_bcast_byte_cnt; /* RX broadcast bytes, excluding FCS */
u32 rx_mcast_byte_cnt; /* RX multicast bytes, excluding FCS */
u32 rx_err_addr; /* packets dropped due to address filtering */
/* tx */
u32 tx_ok; /* good TX packets */
u32 tx_bcast; /* good TX broadcast packets */
u32 tx_mcast; /* good TX multicast packets */
u32 tx_pause; /* TX pause frames */
u32 tx_exc_defer; /* TX packets deferred excessively */
u32 tx_ctrl; /* TX control frames, excluding pause frames */
u32 tx_defer; /* TX packets deferred */
u32 tx_byte_cnt; /* bytes transmitted, FCS is NOT included */
u32 tx_sz_64; /* 64 byte TX packets */
u32 tx_sz_65_127;
u32 tx_sz_128_255;
u32 tx_sz_256_511;
u32 tx_sz_512_1023;
u32 tx_sz_1024_1518;
u32 tx_sz_1519_max; /* 1519 byte to MTU TX packets */
u32 tx_1_col; /* packets TX after a single collision */
u32 tx_2_col; /* packets TX after multiple collisions */
u32 tx_late_col; /* TX packets with late collisions */
u32 tx_abort_col; /* TX packets aborted w/excessive collisions */
u32 tx_underrun; /* TX packets aborted due to TX FIFO underrun
* or TRD FIFO underrun */
u32 tx_rd_eop; /* reads beyond the EOP into the next frame
* when TRD was not written timely */
u32 tx_len_err; /* TX packets where length != actual size */
u32 tx_trunc; /* TX packets truncated due to size > MTU */
u32 tx_bcast_byte; /* broadcast bytes transmitted, excluding FCS */
u32 tx_mcast_byte; /* multicast bytes transmitted, excluding FCS */
u32 smb_updated; /* 1: SMB Updated. This is used by software to
* indicate the statistics update. Software
* should clear this bit after retrieving the
* statistics information. */
};
/* Coalescing Message Block */
struct coals_msg_block {
u32 int_stats; /* interrupt status */
u16 rrd_prod_idx; /* TRD Producer Index. */
u16 rfd_cons_idx; /* RFD Consumer Index. */
u16 update; /* Selene sets this bit every time it DMAs the
* CMB to host memory. Software should clear
* this bit when CMB info is processed. */
u16 tpd_cons_idx; /* TPD Consumer Index. */
};
/* RRD descriptor */
struct rx_return_desc {
u8 num_buf; /* Number of RFD buffers used by the received packet */
u8 resved;
u16 buf_indx; /* RFD Index of the first buffer */
union {
u32 valid;
struct {
u16 rx_chksum;
u16 pkt_size;
} xsum_sz;
} xsz;
u16 pkt_flg; /* Packet flags */
u16 err_flg; /* Error flags */
u16 resved2;
u16 vlan_tag; /* VLAN TAG */
};
#define PACKET_FLAG_ETH_TYPE 0x0080
#define PACKET_FLAG_VLAN_INS 0x0100
#define PACKET_FLAG_ERR 0x0200
#define PACKET_FLAG_IPV4 0x0400
#define PACKET_FLAG_UDP 0x0800
#define PACKET_FLAG_TCP 0x1000
#define PACKET_FLAG_BCAST 0x2000
#define PACKET_FLAG_MCAST 0x4000
#define PACKET_FLAG_PAUSE 0x8000
#define ERR_FLAG_CRC 0x0001
#define ERR_FLAG_CODE 0x0002
#define ERR_FLAG_DRIBBLE 0x0004
#define ERR_FLAG_RUNT 0x0008
#define ERR_FLAG_OV 0x0010
#define ERR_FLAG_TRUNC 0x0020
#define ERR_FLAG_IP_CHKSUM 0x0040
#define ERR_FLAG_L4_CHKSUM 0x0080
#define ERR_FLAG_LEN 0x0100
#define ERR_FLAG_DES_ADDR 0x0200
/* RFD descriptor */
struct rx_free_desc {
__le64 buffer_addr; /* Address of the descriptor's data buffer */
__le16 buf_len; /* Size of the receive buffer in host memory */
u16 coalese; /* Update consumer index to host after the
* reception of this frame */
/* __attribute__ ((packed)) is required */
} __attribute__ ((packed));
/*
* The L1 transmit packet descriptor is comprised of four 32-bit words.
*
* 31 0
* +---------------------------------------+
* | Word 0: Buffer addr lo |
* +---------------------------------------+
* | Word 1: Buffer addr hi |
* +---------------------------------------+
* | Word 2 |
* +---------------------------------------+
* | Word 3 |
* +---------------------------------------+
*
* Words 0 and 1 combine to form a 64-bit buffer address.
*
* Word 2 is self explanatory in the #define block below.
*
* Word 3 has two forms, depending upon the state of bits 3 and 4.
* If bits 3 and 4 are both zero, then bits 14:31 are unused by the
* hardware. Otherwise, if either bit 3 or 4 is set, the definition
* of bits 14:31 vary according to the following depiction.
*
* 0 End of packet 0 End of packet
* 1 Coalesce 1 Coalesce
* 2 Insert VLAN tag 2 Insert VLAN tag
* 3 Custom csum enable = 0 3 Custom csum enable = 1
* 4 Segment enable = 1 4 Segment enable = 0
* 5 Generate IP checksum 5 Generate IP checksum
* 6 Generate TCP checksum 6 Generate TCP checksum
* 7 Generate UDP checksum 7 Generate UDP checksum
* 8 VLAN tagged 8 VLAN tagged
* 9 Ethernet frame type 9 Ethernet frame type
* 10-+ 10-+
* 11 | IP hdr length (10:13) 11 | IP hdr length (10:13)
* 12 | (num 32-bit words) 12 | (num 32-bit words)
* 13-+ 13-+
* 14-+ 14 Unused
* 15 | TCP hdr length (14:17) 15 Unused
* 16 | (num 32-bit words) 16-+
* 17-+ 17 |
* 18 Header TPD flag 18 |
* 19-+ 19 | Payload offset
* 20 | 20 | (16:23)
* 21 | 21 |
* 22 | 22 |
* 23 | 23-+
* 24 | 24-+
* 25 | MSS (19:31) 25 |
* 26 | 26 |
* 27 | 27 | Custom csum offset
* 28 | 28 | (24:31)
* 29 | 29 |
* 30 | 30 |
* 31-+ 31-+
*/
/* tpd word 2 */
#define TPD_BUFLEN_MASK 0x3FFF
#define TPD_BUFLEN_SHIFT 0
#define TPD_DMAINT_MASK 0x0001
#define TPD_DMAINT_SHIFT 14
#define TPD_PKTNT_MASK 0x0001
#define TPD_PKTINT_SHIFT 15
#define TPD_VLANTAG_MASK 0xFFFF
#define TPD_VLAN_SHIFT 16
/* tpd word 3 bits 0:13 */
#define TPD_EOP_MASK 0x0001
#define TPD_EOP_SHIFT 0
#define TPD_COALESCE_MASK 0x0001
#define TPD_COALESCE_SHIFT 1
#define TPD_INS_VL_TAG_MASK 0x0001
#define TPD_INS_VL_TAG_SHIFT 2
#define TPD_CUST_CSUM_EN_MASK 0x0001
#define TPD_CUST_CSUM_EN_SHIFT 3
#define TPD_SEGMENT_EN_MASK 0x0001
#define TPD_SEGMENT_EN_SHIFT 4
#define TPD_IP_CSUM_MASK 0x0001
#define TPD_IP_CSUM_SHIFT 5
#define TPD_TCP_CSUM_MASK 0x0001
#define TPD_TCP_CSUM_SHIFT 6
#define TPD_UDP_CSUM_MASK 0x0001
#define TPD_UDP_CSUM_SHIFT 7
#define TPD_VL_TAGGED_MASK 0x0001
#define TPD_VL_TAGGED_SHIFT 8
#define TPD_ETHTYPE_MASK 0x0001
#define TPD_ETHTYPE_SHIFT 9
#define TPD_IPHL_MASK 0x000F
#define TPD_IPHL_SHIFT 10
/* tpd word 3 bits 14:31 if segment enabled */
#define TPD_TCPHDRLEN_MASK 0x000F
#define TPD_TCPHDRLEN_SHIFT 14
#define TPD_HDRFLAG_MASK 0x0001
#define TPD_HDRFLAG_SHIFT 18
#define TPD_MSS_MASK 0x1FFF
#define TPD_MSS_SHIFT 19
/* tpd word 3 bits 16:31 if custom csum enabled */
#define TPD_PLOADOFFSET_MASK 0x00FF
#define TPD_PLOADOFFSET_SHIFT 16
#define TPD_CCSUMOFFSET_MASK 0x00FF
#define TPD_CCSUMOFFSET_SHIFT 24
struct tx_packet_desc {
__le64 buffer_addr;
__le32 word2;
__le32 word3;
};
/* DMA Order Settings */
enum atl1_dma_order {
atl1_dma_ord_in = 1,
atl1_dma_ord_enh = 2,
atl1_dma_ord_out = 4
};
enum atl1_dma_rcb {
atl1_rcb_64 = 0,
atl1_rcb_128 = 1
};
enum atl1_dma_req_block {
atl1_dma_req_128 = 0,
atl1_dma_req_256 = 1,
atl1_dma_req_512 = 2,
atl1_dma_req_1024 = 3,
atl1_dma_req_2048 = 4,
atl1_dma_req_4096 = 5
};
#define ATL1_MAX_INTR 3
#define ATL1_MAX_TX_BUF_LEN 0x3000 /* 12288 bytes */
#define ATL1_DEFAULT_TPD 256
#define ATL1_MAX_TPD 1024
#define ATL1_MIN_TPD 64
#define ATL1_DEFAULT_RFD 512
#define ATL1_MIN_RFD 128
#define ATL1_MAX_RFD 2048
#define ATL1_REG_COUNT 1538
#define ATL1_GET_DESC(R, i, type) (&(((type *)((R)->desc))[i]))
#define ATL1_RFD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_free_desc)
#define ATL1_TPD_DESC(R, i) ATL1_GET_DESC(R, i, struct tx_packet_desc)
#define ATL1_RRD_DESC(R, i) ATL1_GET_DESC(R, i, struct rx_return_desc)
/*
* atl1_ring_header represents a single, contiguous block of DMA space
* mapped for the three descriptor rings (tpd, rfd, rrd) and the two
* message blocks (cmb, smb) described below
*/
struct atl1_ring_header {
void *desc; /* virtual address */
dma_addr_t dma; /* physical address*/
unsigned int size; /* length in bytes */
};
/*
* atl1_buffer is wrapper around a pointer to a socket buffer
* so a DMA handle can be stored along with the skb
*/
struct atl1_buffer {
struct sk_buff *skb; /* socket buffer */
u16 length; /* rx buffer length */
u16 alloced; /* 1 if skb allocated */
dma_addr_t dma;
};
/* transmit packet descriptor (tpd) ring */
struct atl1_tpd_ring {
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
u16 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
u16 hw_idx; /* hardware index */
atomic_t next_to_clean;
atomic_t next_to_use;
struct atl1_buffer *buffer_info;
};
/* receive free descriptor (rfd) ring */
struct atl1_rfd_ring {
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
u16 size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
atomic_t next_to_use;
u16 next_to_clean;
struct atl1_buffer *buffer_info;
};
/* receive return descriptor (rrd) ring */
struct atl1_rrd_ring {
void *desc; /* descriptor ring virtual address */
dma_addr_t dma; /* descriptor ring physical address */
unsigned int size; /* descriptor ring length in bytes */
u16 count; /* number of descriptors in the ring */
u16 next_to_use;
atomic_t next_to_clean;
};
/* coalescing message block (cmb) */
struct atl1_cmb {
struct coals_msg_block *cmb;
dma_addr_t dma;
};
/* statistics message block (smb) */
struct atl1_smb {
struct stats_msg_block *smb;
dma_addr_t dma;
};
/* Statistics counters */
struct atl1_sft_stats {
u64 rx_packets;
u64 tx_packets;
u64 rx_bytes;
u64 tx_bytes;
u64 multicast;
u64 collisions;
u64 rx_errors;
u64 rx_length_errors;
u64 rx_crc_errors;
u64 rx_frame_errors;
u64 rx_fifo_errors;
u64 rx_missed_errors;
u64 tx_errors;
u64 tx_fifo_errors;
u64 tx_aborted_errors;
u64 tx_window_errors;
u64 tx_carrier_errors;
u64 tx_pause; /* TX pause frames */
u64 excecol; /* TX packets w/ excessive collisions */
u64 deffer; /* TX packets deferred */
u64 scc; /* packets TX after a single collision */
u64 mcc; /* packets TX after multiple collisions */
u64 latecol; /* TX packets w/ late collisions */
u64 tx_underun; /* TX packets aborted due to TX FIFO underrun
* or TRD FIFO underrun */
u64 tx_trunc; /* TX packets truncated due to size > MTU */
u64 rx_pause; /* num Pause packets received. */
u64 rx_rrd_ov;
u64 rx_trunc;
};
/* hardware structure */
struct atl1_hw {
u8 __iomem *hw_addr;
struct atl1_adapter *back;
enum atl1_dma_order dma_ord;
enum atl1_dma_rcb rcb_value;
enum atl1_dma_req_block dmar_block;
enum atl1_dma_req_block dmaw_block;
u8 preamble_len;
u8 max_retry;
u8 jam_ipg; /* IPG to start JAM for collision based flow
* control in half-duplex mode. In units of
* 8-bit time */
u8 ipgt; /* Desired back to back inter-packet gap.
* The default is 96-bit time */
u8 min_ifg; /* Minimum number of IFG to enforce in between
* receive frames. Frame gap below such IFP
* is dropped */
u8 ipgr1; /* 64bit Carrier-Sense window */
u8 ipgr2; /* 96-bit IPG window */
u8 tpd_burst; /* Number of TPD to prefetch in cache-aligned
* burst. Each TPD is 16 bytes long */
u8 rfd_burst; /* Number of RFD to prefetch in cache-aligned
* burst. Each RFD is 12 bytes long */
u8 rfd_fetch_gap;
u8 rrd_burst; /* Threshold number of RRDs that can be retired
* in a burst. Each RRD is 16 bytes long */
u8 tpd_fetch_th;
u8 tpd_fetch_gap;
u16 tx_jumbo_task_th;
u16 txf_burst; /* Number of data bytes to read in a cache-
* aligned burst. Each SRAM entry is 8 bytes */
u16 rx_jumbo_th; /* Jumbo packet size for non-VLAN packet. VLAN
* packets should add 4 bytes */
u16 rx_jumbo_lkah;
u16 rrd_ret_timer; /* RRD retirement timer. Decrement by 1 after
* every 512ns passes. */
u16 lcol; /* Collision Window */
u16 cmb_tpd;
u16 cmb_rrd;
u16 cmb_rx_timer;
u16 cmb_tx_timer;
u32 smb_timer;
u16 media_type;
u16 autoneg_advertised;
u16 mii_autoneg_adv_reg;
u16 mii_1000t_ctrl_reg;
u32 max_frame_size;
u32 min_frame_size;
u16 dev_rev;
/* spi flash */
u8 flash_vendor;
u8 mac_addr[ETH_ALEN];
u8 perm_mac_addr[ETH_ALEN];
bool phy_configured;
};
struct atl1_adapter {
struct net_device *netdev;
struct pci_dev *pdev;
struct net_device_stats net_stats;
struct atl1_sft_stats soft_stats;
struct vlan_group *vlgrp;
u32 rx_buffer_len;
u32 wol;
u16 link_speed;
u16 link_duplex;
spinlock_t lock;
struct work_struct tx_timeout_task;
struct work_struct link_chg_task;
struct work_struct pcie_dma_to_rst_task;
struct timer_list watchdog_timer;
struct timer_list phy_config_timer;
bool phy_timer_pending;
/* all descriptor rings' memory */
struct atl1_ring_header ring_header;
/* TX */
struct atl1_tpd_ring tpd_ring;
spinlock_t mb_lock;
/* RX */
struct atl1_rfd_ring rfd_ring;
struct atl1_rrd_ring rrd_ring;
u64 hw_csum_err;
u64 hw_csum_good;
u32 msg_enable;
u16 imt; /* interrupt moderator timer (2us resolution) */
u16 ict; /* interrupt clear timer (2us resolution */
struct mii_if_info mii; /* MII interface info */
u32 bd_number; /* board number */
bool pci_using_64;
struct atl1_hw hw;
struct atl1_smb smb;
struct atl1_cmb cmb;
};
#endif /* ATL1_H */

433
drivers/net/atlx/atlx.c Normal file
Просмотреть файл

@ -0,0 +1,433 @@
/* atlx.c -- common functions for Attansic network drivers
*
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
* Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
* Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
* Copyright(c) 2007 Atheros Corporation. All rights reserved.
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/* Including this file like a header is a temporary hack, I promise. -- CHS */
#ifndef ATLX_C
#define ATLX_C
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "atlx.h"
static struct atlx_spi_flash_dev flash_table[] = {
/* MFR_NAME WRSR READ PRGM WREN WRDI RDSR RDID SEC_ERS CHIP_ERS */
{"Atmel", 0x00, 0x03, 0x02, 0x06, 0x04, 0x05, 0x15, 0x52, 0x62},
{"SST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0x90, 0x20, 0x60},
{"ST", 0x01, 0x03, 0x02, 0x06, 0x04, 0x05, 0xAB, 0xD8, 0xC7},
};
static int atlx_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
{
switch (cmd) {
case SIOCGMIIPHY:
case SIOCGMIIREG:
case SIOCSMIIREG:
return atlx_mii_ioctl(netdev, ifr, cmd);
default:
return -EOPNOTSUPP;
}
}
/*
* atlx_set_mac - Change the Ethernet Address of the NIC
* @netdev: network interface device structure
* @p: pointer to an address structure
*
* Returns 0 on success, negative on failure
*/
static int atlx_set_mac(struct net_device *netdev, void *p)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
struct sockaddr *addr = p;
if (netif_running(netdev))
return -EBUSY;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(adapter->hw.mac_addr, addr->sa_data, netdev->addr_len);
atlx_set_mac_addr(&adapter->hw);
return 0;
}
static void atlx_check_for_link(struct atlx_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
u16 phy_data = 0;
spin_lock(&adapter->lock);
adapter->phy_timer_pending = false;
atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
atlx_read_phy_reg(&adapter->hw, MII_BMSR, &phy_data);
spin_unlock(&adapter->lock);
/* notify upper layer link down ASAP */
if (!(phy_data & BMSR_LSTATUS)) {
/* Link Down */
if (netif_carrier_ok(netdev)) {
/* old link state: Up */
dev_info(&adapter->pdev->dev, "%s link is down\n",
netdev->name);
adapter->link_speed = SPEED_0;
netif_carrier_off(netdev);
netif_stop_queue(netdev);
}
}
schedule_work(&adapter->link_chg_task);
}
/*
* atlx_set_multi - Multicast and Promiscuous mode set
* @netdev: network interface device structure
*
* The set_multi entry point is called whenever the multicast address
* list or the network interface flags are updated. This routine is
* responsible for configuring the hardware for proper multicast,
* promiscuous mode, and all-multi behavior.
*/
static void atlx_set_multi(struct net_device *netdev)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
struct atlx_hw *hw = &adapter->hw;
struct dev_mc_list *mc_ptr;
u32 rctl;
u32 hash_value;
/* Check for Promiscuous and All Multicast modes */
rctl = ioread32(hw->hw_addr + REG_MAC_CTRL);
if (netdev->flags & IFF_PROMISC)
rctl |= MAC_CTRL_PROMIS_EN;
else if (netdev->flags & IFF_ALLMULTI) {
rctl |= MAC_CTRL_MC_ALL_EN;
rctl &= ~MAC_CTRL_PROMIS_EN;
} else
rctl &= ~(MAC_CTRL_PROMIS_EN | MAC_CTRL_MC_ALL_EN);
iowrite32(rctl, hw->hw_addr + REG_MAC_CTRL);
/* clear the old settings from the multicast hash table */
iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE);
iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2));
/* compute mc addresses' hash value ,and put it into hash table */
for (mc_ptr = netdev->mc_list; mc_ptr; mc_ptr = mc_ptr->next) {
hash_value = atlx_hash_mc_addr(hw, mc_ptr->dmi_addr);
atlx_hash_set(hw, hash_value);
}
}
/*
* atlx_irq_enable - Enable default interrupt generation settings
* @adapter: board private structure
*/
static void atlx_irq_enable(struct atlx_adapter *adapter)
{
iowrite32(IMR_NORMAL_MASK, adapter->hw.hw_addr + REG_IMR);
ioread32(adapter->hw.hw_addr + REG_IMR);
}
/*
* atlx_irq_disable - Mask off interrupt generation on the NIC
* @adapter: board private structure
*/
static void atlx_irq_disable(struct atlx_adapter *adapter)
{
iowrite32(0, adapter->hw.hw_addr + REG_IMR);
ioread32(adapter->hw.hw_addr + REG_IMR);
synchronize_irq(adapter->pdev->irq);
}
static void atlx_clear_phy_int(struct atlx_adapter *adapter)
{
u16 phy_data;
unsigned long flags;
spin_lock_irqsave(&adapter->lock, flags);
atlx_read_phy_reg(&adapter->hw, 19, &phy_data);
spin_unlock_irqrestore(&adapter->lock, flags);
}
/*
* atlx_get_stats - Get System Network Statistics
* @netdev: network interface device structure
*
* Returns the address of the device statistics structure.
* The statistics are actually updated from the timer callback.
*/
static struct net_device_stats *atlx_get_stats(struct net_device *netdev)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
return &adapter->net_stats;
}
/*
* atlx_tx_timeout - Respond to a Tx Hang
* @netdev: network interface device structure
*/
static void atlx_tx_timeout(struct net_device *netdev)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
/* Do the reset outside of interrupt context */
schedule_work(&adapter->tx_timeout_task);
}
/*
* atlx_link_chg_task - deal with link change event Out of interrupt context
*/
static void atlx_link_chg_task(struct work_struct *work)
{
struct atlx_adapter *adapter;
unsigned long flags;
adapter = container_of(work, struct atlx_adapter, link_chg_task);
spin_lock_irqsave(&adapter->lock, flags);
atlx_check_link(adapter);
spin_unlock_irqrestore(&adapter->lock, flags);
}
static void atlx_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp)
{
struct atlx_adapter *adapter = netdev_priv(netdev);
unsigned long flags;
u32 ctrl;
spin_lock_irqsave(&adapter->lock, flags);
/* atlx_irq_disable(adapter); FIXME: confirm/remove */
adapter->vlgrp = grp;
if (grp) {
/* enable VLAN tag insert/strip */
ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
ctrl |= MAC_CTRL_RMV_VLAN;
iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
} else {
/* disable VLAN tag insert/strip */
ctrl = ioread32(adapter->hw.hw_addr + REG_MAC_CTRL);
ctrl &= ~MAC_CTRL_RMV_VLAN;
iowrite32(ctrl, adapter->hw.hw_addr + REG_MAC_CTRL);
}
/* atlx_irq_enable(adapter); FIXME */
spin_unlock_irqrestore(&adapter->lock, flags);
}
static void atlx_restore_vlan(struct atlx_adapter *adapter)
{
atlx_vlan_rx_register(adapter->netdev, adapter->vlgrp);
}
/*
* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
#define ATL1_MAX_NIC 4
#define OPTION_UNSET -1
#define OPTION_DISABLED 0
#define OPTION_ENABLED 1
#define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET }
/*
* Interrupt Moderate Timer in units of 2 us
*
* Valid Range: 10-65535
*
* Default Value: 100 (200us)
*/
static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
static int num_int_mod_timer;
module_param_array_named(int_mod_timer, int_mod_timer, int,
&num_int_mod_timer, 0);
MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer");
/*
* flash_vendor
*
* Valid Range: 0-2
*
* 0 - Atmel
* 1 - SST
* 2 - ST
*
* Default Value: 0
*/
static int __devinitdata flash_vendor[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT;
static int num_flash_vendor;
module_param_array_named(flash_vendor, flash_vendor, int, &num_flash_vendor, 0);
MODULE_PARM_DESC(flash_vendor, "SPI flash vendor");
#define DEFAULT_INT_MOD_CNT 100 /* 200us */
#define MAX_INT_MOD_CNT 65000
#define MIN_INT_MOD_CNT 50
#define FLASH_VENDOR_DEFAULT 0
#define FLASH_VENDOR_MIN 0
#define FLASH_VENDOR_MAX 2
struct atl1_option {
enum { enable_option, range_option, list_option } type;
char *name;
char *err;
int def;
union {
struct { /* range_option info */
int min;
int max;
} r;
struct { /* list_option info */
int nr;
struct atl1_opt_list {
int i;
char *str;
} *p;
} l;
} arg;
};
static int __devinit atl1_validate_option(int *value, struct atl1_option *opt,
struct pci_dev *pdev)
{
if (*value == OPTION_UNSET) {
*value = opt->def;
return 0;
}
switch (opt->type) {
case enable_option:
switch (*value) {
case OPTION_ENABLED:
dev_info(&pdev->dev, "%s enabled\n", opt->name);
return 0;
case OPTION_DISABLED:
dev_info(&pdev->dev, "%s disabled\n", opt->name);
return 0;
}
break;
case range_option:
if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
dev_info(&pdev->dev, "%s set to %i\n", opt->name,
*value);
return 0;
}
break;
case list_option:{
int i;
struct atl1_opt_list *ent;
for (i = 0; i < opt->arg.l.nr; i++) {
ent = &opt->arg.l.p[i];
if (*value == ent->i) {
if (ent->str[0] != '\0')
dev_info(&pdev->dev, "%s\n",
ent->str);
return 0;
}
}
}
break;
default:
break;
}
dev_info(&pdev->dev, "invalid %s specified (%i) %s\n",
opt->name, *value, opt->err);
*value = opt->def;
return -1;
}
/*
* atl1_check_options - Range Checking for Command Line Parameters
* @adapter: board private structure
*
* This routine checks all command line parameters for valid user
* input. If an invalid value is given, or if no user specified
* value exists, a default value is used. The final value is stored
* in a variable in the adapter structure.
*/
void __devinit atl1_check_options(struct atl1_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
int bd = adapter->bd_number;
if (bd >= ATL1_MAX_NIC) {
dev_notice(&pdev->dev, "no configuration for board#%i\n", bd);
dev_notice(&pdev->dev, "using defaults for all values\n");
}
{ /* Interrupt Moderate Timer */
struct atl1_option opt = {
.type = range_option,
.name = "Interrupt Moderator Timer",
.err = "using default of "
__MODULE_STRING(DEFAULT_INT_MOD_CNT),
.def = DEFAULT_INT_MOD_CNT,
.arg = {.r = {.min = MIN_INT_MOD_CNT,
.max = MAX_INT_MOD_CNT} }
};
int val;
if (num_int_mod_timer > bd) {
val = int_mod_timer[bd];
atl1_validate_option(&val, &opt, pdev);
adapter->imt = (u16) val;
} else
adapter->imt = (u16) (opt.def);
}
{ /* Flash Vendor */
struct atl1_option opt = {
.type = range_option,
.name = "SPI Flash Vendor",
.err = "using default of "
__MODULE_STRING(FLASH_VENDOR_DEFAULT),
.def = DEFAULT_INT_MOD_CNT,
.arg = {.r = {.min = FLASH_VENDOR_MIN,
.max = FLASH_VENDOR_MAX} }
};
int val;
if (num_flash_vendor > bd) {
val = flash_vendor[bd];
atl1_validate_option(&val, &opt, pdev);
adapter->hw.flash_vendor = (u8) val;
} else
adapter->hw.flash_vendor = (u8) (opt.def);
}
}
#endif /* ATLX_C */

506
drivers/net/atlx/atlx.h Normal file
Просмотреть файл

@ -0,0 +1,506 @@
/* atlx_hw.h -- common hardware definitions for Attansic network drivers
*
* Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved.
* Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com>
* Copyright(c) 2006 Jay Cliburn <jcliburn@gmail.com>
* Copyright(c) 2007 Atheros Corporation. All rights reserved.
*
* Derived from Intel e1000 driver
* Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#ifndef ATLX_H
#define ATLX_H
#include <linux/module.h>
#include <linux/types.h>
#define ATLX_DRIVER_VERSION "2.1.1"
MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(ATLX_DRIVER_VERSION);
#define ATLX_ERR_PHY 2
#define ATLX_ERR_PHY_SPEED 7
#define ATLX_ERR_PHY_RES 8
#define SPEED_0 0xffff
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
#define HALF_DUPLEX 1
#define FULL_DUPLEX 2
#define MEDIA_TYPE_AUTO_SENSOR 0
/* register definitions */
#define REG_PM_CTRLSTAT 0x44
#define REG_PCIE_CAP_LIST 0x58
#define REG_VPD_CAP 0x6C
#define VPD_CAP_ID_MASK 0xFF
#define VPD_CAP_ID_SHIFT 0
#define VPD_CAP_NEXT_PTR_MASK 0xFF
#define VPD_CAP_NEXT_PTR_SHIFT 8
#define VPD_CAP_VPD_ADDR_MASK 0x7FFF
#define VPD_CAP_VPD_ADDR_SHIFT 16
#define VPD_CAP_VPD_FLAG 0x80000000
#define REG_VPD_DATA 0x70
#define REG_SPI_FLASH_CTRL 0x200
#define SPI_FLASH_CTRL_STS_NON_RDY 0x1
#define SPI_FLASH_CTRL_STS_WEN 0x2
#define SPI_FLASH_CTRL_STS_WPEN 0x80
#define SPI_FLASH_CTRL_DEV_STS_MASK 0xFF
#define SPI_FLASH_CTRL_DEV_STS_SHIFT 0
#define SPI_FLASH_CTRL_INS_MASK 0x7
#define SPI_FLASH_CTRL_INS_SHIFT 8
#define SPI_FLASH_CTRL_START 0x800
#define SPI_FLASH_CTRL_EN_VPD 0x2000
#define SPI_FLASH_CTRL_LDSTART 0x8000
#define SPI_FLASH_CTRL_CS_HI_MASK 0x3
#define SPI_FLASH_CTRL_CS_HI_SHIFT 16
#define SPI_FLASH_CTRL_CS_HOLD_MASK 0x3
#define SPI_FLASH_CTRL_CS_HOLD_SHIFT 18
#define SPI_FLASH_CTRL_CLK_LO_MASK 0x3
#define SPI_FLASH_CTRL_CLK_LO_SHIFT 20
#define SPI_FLASH_CTRL_CLK_HI_MASK 0x3
#define SPI_FLASH_CTRL_CLK_HI_SHIFT 22
#define SPI_FLASH_CTRL_CS_SETUP_MASK 0x3
#define SPI_FLASH_CTRL_CS_SETUP_SHIFT 24
#define SPI_FLASH_CTRL_EROM_PGSZ_MASK 0x3
#define SPI_FLASH_CTRL_EROM_PGSZ_SHIFT 26
#define SPI_FLASH_CTRL_WAIT_READY 0x10000000
#define REG_SPI_ADDR 0x204
#define REG_SPI_DATA 0x208
#define REG_SPI_FLASH_CONFIG 0x20C
#define SPI_FLASH_CONFIG_LD_ADDR_MASK 0xFFFFFF
#define SPI_FLASH_CONFIG_LD_ADDR_SHIFT 0
#define SPI_FLASH_CONFIG_VPD_ADDR_MASK 0x3
#define SPI_FLASH_CONFIG_VPD_ADDR_SHIFT 24
#define SPI_FLASH_CONFIG_LD_EXIST 0x4000000
#define REG_SPI_FLASH_OP_PROGRAM 0x210
#define REG_SPI_FLASH_OP_SC_ERASE 0x211
#define REG_SPI_FLASH_OP_CHIP_ERASE 0x212
#define REG_SPI_FLASH_OP_RDID 0x213
#define REG_SPI_FLASH_OP_WREN 0x214
#define REG_SPI_FLASH_OP_RDSR 0x215
#define REG_SPI_FLASH_OP_WRSR 0x216
#define REG_SPI_FLASH_OP_READ 0x217
#define REG_TWSI_CTRL 0x218
#define TWSI_CTRL_LD_OFFSET_MASK 0xFF
#define TWSI_CTRL_LD_OFFSET_SHIFT 0
#define TWSI_CTRL_LD_SLV_ADDR_MASK 0x7
#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
#define TWSI_CTRL_SW_LDSTART 0x800
#define TWSI_CTRL_HW_LDSTART 0x1000
#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
#define TWSI_CTRL_LD_EXIST 0x400000
#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
#define TWSI_CTRL_READ_FREQ_SEL_SHIFT 23
#define TWSI_CTRL_FREQ_SEL_100K 0
#define TWSI_CTRL_FREQ_SEL_200K 1
#define TWSI_CTRL_FREQ_SEL_300K 2
#define TWSI_CTRL_FREQ_SEL_400K 3
#define TWSI_CTRL_SMB_SLV_ADDR /* FIXME: define or remove */
#define TWSI_CTRL_WRITE_FREQ_SEL_MASK 0x3
#define TWSI_CTRL_WRITE_FREQ_SEL_SHIFT 24
#define REG_PCIE_DEV_MISC_CTRL 0x21C
#define PCIE_DEV_MISC_CTRL_EXT_PIPE 0x2
#define PCIE_DEV_MISC_CTRL_RETRY_BUFDIS 0x1
#define PCIE_DEV_MISC_CTRL_SPIROM_EXIST 0x4
#define PCIE_DEV_MISC_CTRL_SERDES_ENDIAN 0x8
#define PCIE_DEV_MISC_CTRL_SERDES_SEL_DIN 0x10
#define REG_PCIE_PHYMISC 0x1000
#define PCIE_PHYMISC_FORCE_RCV_DET 0x4
#define REG_PCIE_DLL_TX_CTRL1 0x1104
#define PCIE_DLL_TX_CTRL1_SEL_NOR_CLK 0x400
#define PCIE_DLL_TX_CTRL1_DEF 0x568
#define REG_LTSSM_TEST_MODE 0x12FC
#define LTSSM_TEST_MODE_DEF 0x6500
/* Master Control Register */
#define REG_MASTER_CTRL 0x1400
#define MASTER_CTRL_SOFT_RST 0x1
#define MASTER_CTRL_MTIMER_EN 0x2
#define MASTER_CTRL_ITIMER_EN 0x4
#define MASTER_CTRL_MANUAL_INT 0x8
#define MASTER_CTRL_REV_NUM_SHIFT 16
#define MASTER_CTRL_REV_NUM_MASK 0xFF
#define MASTER_CTRL_DEV_ID_SHIFT 24
#define MASTER_CTRL_DEV_ID_MASK 0xFF
/* Timer Initial Value Register */
#define REG_MANUAL_TIMER_INIT 0x1404
/* IRQ Moderator Timer Initial Value Register */
#define REG_IRQ_MODU_TIMER_INIT 0x1408
#define REG_PHY_ENABLE 0x140C
/* IRQ Anti-Lost Timer Initial Value Register */
#define REG_CMBDISDMA_TIMER 0x140E
/* Block IDLE Status Register */
#define REG_IDLE_STATUS 0x1410
/* MDIO Control Register */
#define REG_MDIO_CTRL 0x1414
#define MDIO_DATA_MASK 0xFFFF
#define MDIO_DATA_SHIFT 0
#define MDIO_REG_ADDR_MASK 0x1F
#define MDIO_REG_ADDR_SHIFT 16
#define MDIO_RW 0x200000
#define MDIO_SUP_PREAMBLE 0x400000
#define MDIO_START 0x800000
#define MDIO_CLK_SEL_SHIFT 24
#define MDIO_CLK_25_4 0
#define MDIO_CLK_25_6 2
#define MDIO_CLK_25_8 3
#define MDIO_CLK_25_10 4
#define MDIO_CLK_25_14 5
#define MDIO_CLK_25_20 6
#define MDIO_CLK_25_28 7
#define MDIO_BUSY 0x8000000
/* MII PHY Status Register */
#define REG_PHY_STATUS 0x1418
/* BIST Control and Status Register0 (for the Packet Memory) */
#define REG_BIST0_CTRL 0x141C
#define BIST0_NOW 0x1
#define BIST0_SRAM_FAIL 0x2
#define BIST0_FUSE_FLAG 0x4
#define REG_BIST1_CTRL 0x1420
#define BIST1_NOW 0x1
#define BIST1_SRAM_FAIL 0x2
#define BIST1_FUSE_FLAG 0x4
/* SerDes Lock Detect Control and Status Register */
#define REG_SERDES_LOCK 0x1424
#define SERDES_LOCK_DETECT 1
#define SERDES_LOCK_DETECT_EN 2
/* MAC Control Register */
#define REG_MAC_CTRL 0x1480
#define MAC_CTRL_TX_EN 1
#define MAC_CTRL_RX_EN 2
#define MAC_CTRL_TX_FLOW 4
#define MAC_CTRL_RX_FLOW 8
#define MAC_CTRL_LOOPBACK 0x10
#define MAC_CTRL_DUPLX 0x20
#define MAC_CTRL_ADD_CRC 0x40
#define MAC_CTRL_PAD 0x80
#define MAC_CTRL_LENCHK 0x100
#define MAC_CTRL_HUGE_EN 0x200
#define MAC_CTRL_PRMLEN_SHIFT 10
#define MAC_CTRL_PRMLEN_MASK 0xF
#define MAC_CTRL_RMV_VLAN 0x4000
#define MAC_CTRL_PROMIS_EN 0x8000
#define MAC_CTRL_MC_ALL_EN 0x2000000
#define MAC_CTRL_BC_EN 0x4000000
/* MAC IPG/IFG Control Register */
#define REG_MAC_IPG_IFG 0x1484
#define MAC_IPG_IFG_IPGT_SHIFT 0
#define MAC_IPG_IFG_IPGT_MASK 0x7F
#define MAC_IPG_IFG_MIFG_SHIFT 8
#define MAC_IPG_IFG_MIFG_MASK 0xFF
#define MAC_IPG_IFG_IPGR1_SHIFT 16
#define MAC_IPG_IFG_IPGR1_MASK 0x7F
#define MAC_IPG_IFG_IPGR2_SHIFT 24
#define MAC_IPG_IFG_IPGR2_MASK 0x7F
/* MAC STATION ADDRESS */
#define REG_MAC_STA_ADDR 0x1488
/* Hash table for multicast address */
#define REG_RX_HASH_TABLE 0x1490
/* MAC Half-Duplex Control Register */
#define REG_MAC_HALF_DUPLX_CTRL 0x1498
#define MAC_HALF_DUPLX_CTRL_LCOL_SHIFT 0
#define MAC_HALF_DUPLX_CTRL_LCOL_MASK 0x3FF
#define MAC_HALF_DUPLX_CTRL_RETRY_SHIFT 12
#define MAC_HALF_DUPLX_CTRL_RETRY_MASK 0xF
#define MAC_HALF_DUPLX_CTRL_EXC_DEF_EN 0x10000
#define MAC_HALF_DUPLX_CTRL_NO_BACK_C 0x20000
#define MAC_HALF_DUPLX_CTRL_NO_BACK_P 0x40000
#define MAC_HALF_DUPLX_CTRL_ABEBE 0x80000
#define MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT 20
#define MAC_HALF_DUPLX_CTRL_ABEBT_MASK 0xF
#define MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT 24
#define MAC_HALF_DUPLX_CTRL_JAMIPG_MASK 0xF
/* Maximum Frame Length Control Register */
#define REG_MTU 0x149C
/* Wake-On-Lan control register */
#define REG_WOL_CTRL 0x14A0
#define WOL_PATTERN_EN 0x1
#define WOL_PATTERN_PME_EN 0x2
#define WOL_MAGIC_EN 0x4
#define WOL_MAGIC_PME_EN 0x8
#define WOL_LINK_CHG_EN 0x10
#define WOL_LINK_CHG_PME_EN 0x20
#define WOL_PATTERN_ST 0x100
#define WOL_MAGIC_ST 0x200
#define WOL_LINKCHG_ST 0x400
#define WOL_PT0_EN 0x10000
#define WOL_PT1_EN 0x20000
#define WOL_PT2_EN 0x40000
#define WOL_PT3_EN 0x80000
#define WOL_PT4_EN 0x100000
#define WOL_PT0_MATCH 0x1000000
#define WOL_PT1_MATCH 0x2000000
#define WOL_PT2_MATCH 0x4000000
#define WOL_PT3_MATCH 0x8000000
#define WOL_PT4_MATCH 0x10000000
/* Internal SRAM Partition Register, high 32 bits */
#define REG_SRAM_RFD_ADDR 0x1500
/* Descriptor Control register, high 32 bits */
#define REG_DESC_BASE_ADDR_HI 0x1540
/* Interrupt Status Register */
#define REG_ISR 0x1600
#define ISR_UR_DETECTED 0x1000000
#define ISR_FERR_DETECTED 0x2000000
#define ISR_NFERR_DETECTED 0x4000000
#define ISR_CERR_DETECTED 0x8000000
#define ISR_PHY_LINKDOWN 0x10000000
#define ISR_DIS_INT 0x80000000
/* Interrupt Mask Register */
#define REG_IMR 0x1604
#define REG_RFD_RRD_IDX 0x1800
#define REG_TPD_IDX 0x1804
/* MII definitions */
/* PHY Common Register */
#define MII_ATLX_CR 0x09
#define MII_ATLX_SR 0x0A
#define MII_ATLX_ESR 0x0F
#define MII_ATLX_PSCR 0x10
#define MII_ATLX_PSSR 0x11
/* PHY Control Register */
#define MII_CR_SPEED_SELECT_MSB 0x0040 /* bits 6,13: 10=1000, 01=100,
* 00=10
*/
#define MII_CR_COLL_TEST_ENABLE 0x0080 /* Collision test enable */
#define MII_CR_FULL_DUPLEX 0x0100 /* FDX =1, half duplex =0 */
#define MII_CR_RESTART_AUTO_NEG 0x0200 /* Restart auto negotiation */
#define MII_CR_ISOLATE 0x0400 /* Isolate PHY from MII */
#define MII_CR_POWER_DOWN 0x0800 /* Power down */
#define MII_CR_AUTO_NEG_EN 0x1000 /* Auto Neg Enable */
#define MII_CR_SPEED_SELECT_LSB 0x2000 /* bits 6,13: 10=1000, 01=100,
* 00=10
*/
#define MII_CR_LOOPBACK 0x4000 /* 0 = normal, 1 = loopback */
#define MII_CR_RESET 0x8000 /* 0 = normal, 1 = PHY reset */
#define MII_CR_SPEED_MASK 0x2040
#define MII_CR_SPEED_1000 0x0040
#define MII_CR_SPEED_100 0x2000
#define MII_CR_SPEED_10 0x0000
/* PHY Status Register */
#define MII_SR_EXTENDED_CAPS 0x0001 /* Ext register capabilities */
#define MII_SR_JABBER_DETECT 0x0002 /* Jabber Detected */
#define MII_SR_LINK_STATUS 0x0004 /* Link Status 1 = link */
#define MII_SR_AUTONEG_CAPS 0x0008 /* Auto Neg Capable */
#define MII_SR_REMOTE_FAULT 0x0010 /* Remote Fault Detect */
#define MII_SR_AUTONEG_COMPLETE 0x0020 /* Auto Neg Complete */
#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
#define MII_SR_EXTENDED_STATUS 0x0100 /* Ext stat info in Reg 0x0F */
#define MII_SR_100T2_HD_CAPS 0x0200 /* 100T2 Half Duplex Capable */
#define MII_SR_100T2_FD_CAPS 0x0400 /* 100T2 Full Duplex Capable */
#define MII_SR_10T_HD_CAPS 0x0800 /* 10T Half Duplex Capable */
#define MII_SR_10T_FD_CAPS 0x1000 /* 10T Full Duplex Capable */
#define MII_SR_100X_HD_CAPS 0x2000 /* 100X Half Duplex Capable */
#define MII_SR_100X_FD_CAPS 0x4000 /* 100X Full Duplex Capable */
#define MII_SR_100T4_CAPS 0x8000 /* 100T4 Capable */
/* Link partner ability register */
#define MII_LPA_SLCT 0x001f /* Same as advertise selector */
#define MII_LPA_10HALF 0x0020 /* Can do 10mbps half-duplex */
#define MII_LPA_10FULL 0x0040 /* Can do 10mbps full-duplex */
#define MII_LPA_100HALF 0x0080 /* Can do 100mbps half-duplex */
#define MII_LPA_100FULL 0x0100 /* Can do 100mbps full-duplex */
#define MII_LPA_100BASE4 0x0200 /* 100BASE-T4 */
#define MII_LPA_PAUSE 0x0400 /* PAUSE */
#define MII_LPA_ASYPAUSE 0x0800 /* Asymmetrical PAUSE */
#define MII_LPA_RFAULT 0x2000 /* Link partner faulted */
#define MII_LPA_LPACK 0x4000 /* Link partner acked us */
#define MII_LPA_NPAGE 0x8000 /* Next page bit */
/* Autoneg Advertisement Register */
#define MII_AR_SELECTOR_FIELD 0x0001 /* IEEE 802.3 CSMA/CD */
#define MII_AR_10T_HD_CAPS 0x0020 /* 10T Half Duplex Capable */
#define MII_AR_10T_FD_CAPS 0x0040 /* 10T Full Duplex Capable */
#define MII_AR_100TX_HD_CAPS 0x0080 /* 100TX Half Duplex Capable */
#define MII_AR_100TX_FD_CAPS 0x0100 /* 100TX Full Duplex Capable */
#define MII_AR_100T4_CAPS 0x0200 /* 100T4 Capable */
#define MII_AR_PAUSE 0x0400 /* Pause operation desired */
#define MII_AR_ASM_DIR 0x0800 /* Asymmetric Pause Dir bit */
#define MII_AR_REMOTE_FAULT 0x2000 /* Remote Fault detected */
#define MII_AR_NEXT_PAGE 0x8000 /* Next Page ability support */
#define MII_AR_SPEED_MASK 0x01E0
#define MII_AR_DEFAULT_CAP_MASK 0x0DE0
/* 1000BASE-T Control Register */
#define MII_ATLX_CR_1000T_HD_CAPS 0x0100 /* Adv 1000T HD cap */
#define MII_ATLX_CR_1000T_FD_CAPS 0x0200 /* Adv 1000T FD cap */
#define MII_ATLX_CR_1000T_REPEATER_DTE 0x0400 /* 1=Repeater/switch device,
* 0=DTE device */
#define MII_ATLX_CR_1000T_MS_VALUE 0x0800 /* 1=Config PHY as Master,
* 0=Configure PHY as Slave */
#define MII_ATLX_CR_1000T_MS_ENABLE 0x1000 /* 1=Man Master/Slave config,
* 0=Auto Master/Slave config
*/
#define MII_ATLX_CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
#define MII_ATLX_CR_1000T_TEST_MODE_1 0x2000 /* Transmit Waveform test */
#define MII_ATLX_CR_1000T_TEST_MODE_2 0x4000 /* Master Xmit Jitter test */
#define MII_ATLX_CR_1000T_TEST_MODE_3 0x6000 /* Slave Xmit Jitter test */
#define MII_ATLX_CR_1000T_TEST_MODE_4 0x8000 /* Xmitter Distortion test */
#define MII_ATLX_CR_1000T_SPEED_MASK 0x0300
#define MII_ATLX_CR_1000T_DEFAULT_CAP_MASK 0x0300
/* 1000BASE-T Status Register */
#define MII_ATLX_SR_1000T_LP_HD_CAPS 0x0400 /* LP is 1000T HD capable */
#define MII_ATLX_SR_1000T_LP_FD_CAPS 0x0800 /* LP is 1000T FD capable */
#define MII_ATLX_SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
#define MII_ATLX_SR_1000T_LOCAL_RX_STATUS 0x2000 /* Local receiver OK */
#define MII_ATLX_SR_1000T_MS_CONFIG_RES 0x4000 /* 1=Local TX is Master
* 0=Slave
*/
#define MII_ATLX_SR_1000T_MS_CONFIG_FAULT 0x8000 /* Master/Slave config
* fault */
#define MII_ATLX_SR_1000T_REMOTE_RX_STATUS_SHIFT 12
#define MII_ATLX_SR_1000T_LOCAL_RX_STATUS_SHIFT 13
/* Extended Status Register */
#define MII_ATLX_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
#define MII_ATLX_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
#define MII_ATLX_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
#define MII_ATLX_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
/* ATLX PHY Specific Control Register */
#define MII_ATLX_PSCR_JABBER_DISABLE 0x0001 /* 1=Jabber Func disabled */
#define MII_ATLX_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enbld */
#define MII_ATLX_PSCR_SQE_TEST 0x0004 /* 1=SQE Test enabled */
#define MII_ATLX_PSCR_MAC_POWERDOWN 0x0008
#define MII_ATLX_PSCR_CLK125_DISABLE 0x0010 /* 1=CLK125 low
* 0=CLK125 toggling
*/
#define MII_ATLX_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5,
* Manual MDI configuration
*/
#define MII_ATLX_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
#define MII_ATLX_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover
* 100BASE-TX/10BASE-T: MDI
* Mode */
#define MII_ATLX_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
* all speeds.
*/
#define MII_ATLX_PSCR_10BT_EXT_DIST_ENABLE 0x0080 /* 1=Enable Extended
* 10BASE-T distance
* (Lower 10BASE-T RX
* Threshold)
* 0=Normal 10BASE-T RX
* Threshold
*/
#define MII_ATLX_PSCR_MII_5BIT_ENABLE 0x0100 /* 1=5-Bit interface in
* 100BASE-TX
* 0=MII interface in
* 100BASE-TX
*/
#define MII_ATLX_PSCR_SCRAMBLER_DISABLE 0x0200 /* 1=Scrambler dsbl */
#define MII_ATLX_PSCR_FORCE_LINK_GOOD 0x0400 /* 1=Force link good */
#define MII_ATLX_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
#define MII_ATLX_PSCR_POLARITY_REVERSAL_SHIFT 1
#define MII_ATLX_PSCR_AUTO_X_MODE_SHIFT 5
#define MII_ATLX_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
/* ATLX PHY Specific Status Register */
#define MII_ATLX_PSSR_SPD_DPLX_RESOLVED 0x0800 /* 1=Speed & Duplex resolved */
#define MII_ATLX_PSSR_DPLX 0x2000 /* 1=Duplex 0=Half Duplex */
#define MII_ATLX_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
#define MII_ATLX_PSSR_10MBS 0x0000 /* 00=10Mbs */
#define MII_ATLX_PSSR_100MBS 0x4000 /* 01=100Mbs */
#define MII_ATLX_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
/* PCI Command Register Bit Definitions */
#define PCI_REG_COMMAND 0x04 /* PCI Command Register */
#define CMD_IO_SPACE 0x0001
#define CMD_MEMORY_SPACE 0x0002
#define CMD_BUS_MASTER 0x0004
/* Wake Up Filter Control */
#define ATLX_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
#define ATLX_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */
#define ATLX_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */
#define ATLX_WUFC_MC 0x00000008 /* Multicast Wakeup Enable */
#define ATLX_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */
#define ADVERTISE_10_HALF 0x0001
#define ADVERTISE_10_FULL 0x0002
#define ADVERTISE_100_HALF 0x0004
#define ADVERTISE_100_FULL 0x0008
#define ADVERTISE_1000_HALF 0x0010
#define ADVERTISE_1000_FULL 0x0020
#define AUTONEG_ADVERTISE_10_100_ALL 0x000F /* All 10/100 speeds */
#define AUTONEG_ADVERTISE_10_ALL 0x0003 /* 10Mbps Full & Half speeds */
#define PHY_AUTO_NEG_TIME 45 /* 4.5 Seconds */
#define PHY_FORCE_TIME 20 /* 2.0 Seconds */
/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA */
#define EEPROM_SUM 0xBABA
#define NODE_ADDRESS_SIZE 6
struct atlx_spi_flash_dev {
const char *manu_name; /* manufacturer id */
/* op-code */
u8 cmd_wrsr;
u8 cmd_read;
u8 cmd_program;
u8 cmd_wren;
u8 cmd_wrdi;
u8 cmd_rdsr;
u8 cmd_rdid;
u8 cmd_sector_erase;
u8 cmd_chip_erase;
};
#endif /* ATLX_H */

Просмотреть файл

@ -378,8 +378,8 @@ static void __init get_node_ID(struct net_device *dev)
sa_offset = 15;
for (i = 0; i < 3; i++)
((u16 *)dev->dev_addr)[i] =
be16_to_cpu(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
((__be16 *)dev->dev_addr)[i] =
cpu_to_be16(eeprom_op(ioaddr, EE_READ(sa_offset + i)));
write_reg(ioaddr, CMR2, CMR2_NULL);
}

Просмотреть файл

@ -701,7 +701,7 @@ static struct net_device * au1000_probe(int port_num)
aup->mii_bus.write = mdiobus_write;
aup->mii_bus.reset = mdiobus_reset;
aup->mii_bus.name = "au1000_eth_mii";
aup->mii_bus.id = aup->mac_id;
snprintf(aup->mii_bus.id, MII_BUS_ID_SIZE, "%x", aup->mac_id);
aup->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
for(i = 0; i < PHY_MAX_ADDR; ++i)
aup->mii_bus.irq[i] = PHY_POLL;
@ -709,11 +709,11 @@ static struct net_device * au1000_probe(int port_num)
/* if known, set corresponding PHY IRQs */
#if defined(AU1XXX_PHY_STATIC_CONFIG)
# if defined(AU1XXX_PHY0_IRQ)
if (AU1XXX_PHY0_BUSID == aup->mii_bus.id)
if (AU1XXX_PHY0_BUSID == aup->mac_id)
aup->mii_bus.irq[AU1XXX_PHY0_ADDR] = AU1XXX_PHY0_IRQ;
# endif
# if defined(AU1XXX_PHY1_IRQ)
if (AU1XXX_PHY1_BUSID == aup->mii_bus.id)
if (AU1XXX_PHY1_BUSID == aup->mac_id)
aup->mii_bus.irq[AU1XXX_PHY1_ADDR] = AU1XXX_PHY1_IRQ;
# endif
#endif

Просмотреть файл

@ -969,7 +969,7 @@ static int __init bf537mac_probe(struct net_device *dev)
lp->mii_bus.write = mdiobus_write;
lp->mii_bus.reset = mdiobus_reset;
lp->mii_bus.name = "bfin_mac_mdio";
lp->mii_bus.id = 0;
snprintf(lp->mii_bus.id, MII_BUS_ID_SIZE, "0");
lp->mii_bus.irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
for (i = 0; i < PHY_MAX_ADDR; ++i)
lp->mii_bus.irq[i] = PHY_POLL;

Просмотреть файл

@ -2429,7 +2429,7 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
struct slave *slave = NULL;
int ret = NET_RX_DROP;
if (dev->nd_net != &init_net)
if (dev_net(dev) != &init_net)
goto out;
if (!(dev->flags & IFF_MASTER))

Просмотреть файл

@ -345,7 +345,7 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
struct arp_pkt *arp = (struct arp_pkt *)skb->data;
int res = NET_RX_DROP;
if (bond_dev->nd_net != &init_net)
if (dev_net(bond_dev) != &init_net)
goto out;
if (!(bond_dev->flags & IFF_MASTER))

Просмотреть файл

@ -2629,7 +2629,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
unsigned char *arp_ptr;
__be32 sip, tip;
if (dev->nd_net != &init_net)
if (dev_net(dev) != &init_net)
goto out;
if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
@ -2646,10 +2646,7 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
if (!slave || !slave_do_arp_validate(bond, slave))
goto out_unlock;
/* ARP header, plus 2 device addresses, plus 2 IP addresses. */
if (!pskb_may_pull(skb, (sizeof(struct arphdr) +
(2 * dev->addr_len) +
(2 * sizeof(u32)))))
if (!pskb_may_pull(skb, arp_hdr_len(dev)))
goto out_unlock;
arp = arp_hdr(skb);
@ -3068,8 +3065,6 @@ out:
#ifdef CONFIG_PROC_FS
#define SEQ_START_TOKEN ((void *)1)
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
{
struct bonding *bond = seq->private;
@ -3473,7 +3468,7 @@ static int bond_netdev_event(struct notifier_block *this, unsigned long event, v
{
struct net_device *event_dev = (struct net_device *)ptr;
if (event_dev->nd_net != &init_net)
if (dev_net(event_dev) != &init_net)
return NOTIFY_DONE;
dprintk("event_dev: %s, event: %lx\n",
@ -3511,6 +3506,9 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
struct bonding *bond, *bond_next;
struct vlan_entry *vlan, *vlan_next;
if (dev_net(ifa->ifa_dev->dev) != &init_net)
return NOTIFY_DONE;
list_for_each_entry_safe(bond, bond_next, &bond_dev_list, bond_list) {
if (bond->dev == event_dev) {
switch (event) {

Просмотреть файл

@ -532,8 +532,7 @@ static void cas_spare_free(struct cas *cp)
/* free spare buffers */
INIT_LIST_HEAD(&list);
spin_lock(&cp->rx_spare_lock);
list_splice(&cp->rx_spare_list, &list);
INIT_LIST_HEAD(&cp->rx_spare_list);
list_splice_init(&cp->rx_spare_list, &list);
spin_unlock(&cp->rx_spare_lock);
list_for_each_safe(elem, tmp, &list) {
cas_page_free(cp, list_entry(elem, cas_page_t, list));
@ -546,13 +545,11 @@ static void cas_spare_free(struct cas *cp)
* lock than used everywhere else to manipulate this list.
*/
spin_lock(&cp->rx_inuse_lock);
list_splice(&cp->rx_inuse_list, &list);
INIT_LIST_HEAD(&cp->rx_inuse_list);
list_splice_init(&cp->rx_inuse_list, &list);
spin_unlock(&cp->rx_inuse_lock);
#else
spin_lock(&cp->rx_spare_lock);
list_splice(&cp->rx_inuse_list, &list);
INIT_LIST_HEAD(&cp->rx_inuse_list);
list_splice_init(&cp->rx_inuse_list, &list);
spin_unlock(&cp->rx_spare_lock);
#endif
list_for_each_safe(elem, tmp, &list) {
@ -573,8 +570,7 @@ static void cas_spare_recover(struct cas *cp, const gfp_t flags)
/* make a local copy of the list */
INIT_LIST_HEAD(&list);
spin_lock(&cp->rx_inuse_lock);
list_splice(&cp->rx_inuse_list, &list);
INIT_LIST_HEAD(&cp->rx_inuse_list);
list_splice_init(&cp->rx_inuse_list, &list);
spin_unlock(&cp->rx_inuse_lock);
list_for_each_safe(elem, tmp, &list) {

Просмотреть файл

@ -987,7 +987,7 @@ static int external_switch;
static int __devinit cpmac_probe(struct platform_device *pdev)
{
int rc, phy_id, i;
int mdio_bus_id = cpmac_mii.id;
char *mdio_bus_id = "0";
struct resource *mem;
struct cpmac_priv *priv;
struct net_device *dev;
@ -1008,8 +1008,6 @@ static int __devinit cpmac_probe(struct platform_device *pdev)
if (external_switch || dumb_switch) {
struct fixed_phy_status status = {};
mdio_bus_id = 0;
/*
* FIXME: this should be in the platform code!
* Since there is not platform code at all (that is,
@ -1143,6 +1141,7 @@ int __devinit cpmac_init(void)
}
cpmac_mii.phy_mask = ~(mask | 0x80000000);
snprintf(cpmac_mii.id, MII_BUS_ID_SIZE, "0");
res = mdiobus_register(&cpmac_mii);
if (res)

Просмотреть файл

@ -1014,8 +1014,8 @@ static int offload_open(struct net_device *dev)
adapter->port[0]->mtu : 0xffff);
init_smt(adapter);
/* Never mind if the next step fails */
sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group);
if (sysfs_create_group(&tdev->lldev->dev.kobj, &offload_attr_group))
dev_dbg(&dev->dev, "cannot create sysfs group\n");
/* Call back all registered clients */
cxgb3_add_clients(tdev);

Просмотреть файл

@ -833,10 +833,26 @@ static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
return 0;
}
/*
* That skb would better have come from process_responses() where we abuse
* ->priority and ->csum to carry our data. NB: if we get to per-arch
* ->csum, the things might get really interesting here.
*/
static inline u32 get_hwtid(struct sk_buff *skb)
{
return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff;
}
static inline u32 get_opcode(struct sk_buff *skb)
{
return G_OPCODE(ntohl((__force __be32)skb->csum));
}
static int do_term(struct t3cdev *dev, struct sk_buff *skb)
{
unsigned int hwtid = ntohl(skb->priority) >> 8 & 0xfffff;
unsigned int opcode = G_OPCODE(ntohl(skb->csum));
unsigned int hwtid = get_hwtid(skb);
unsigned int opcode = get_opcode(skb);
struct t3c_tid_entry *t3c_tid;
t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
@ -914,7 +930,7 @@ int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
{
while (n--) {
struct sk_buff *skb = *skbs++;
unsigned int opcode = G_OPCODE(ntohl(skb->csum));
unsigned int opcode = get_opcode(skb);
int ret = cpl_handlers[opcode] (dev, skb);
#if VALIDATE_TID

Просмотреть файл

@ -407,7 +407,7 @@ found:
} else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
setup_l2e_send_pending(dev, NULL, e);
} else {
e->state = neigh_is_connected(neigh) ?
e->state = neigh->nud_state & NUD_CONNECTED ?
L2T_STATE_VALID : L2T_STATE_STALE;
if (memcmp(e->dmac, neigh->ha, 6))
setup_l2e_send_pending(dev, NULL, e);

Просмотреть файл

@ -971,7 +971,8 @@ static int __devinit dfx_driver_init(struct net_device *dev,
int alloc_size; /* total buffer size needed */
char *top_v, *curr_v; /* virtual addrs into memory block */
dma_addr_t top_p, curr_p; /* physical addrs into memory block */
u32 data, le32; /* host data register value */
u32 data; /* host data register value */
__le32 le32;
char *board_name = NULL;
DBG_printk("In dfx_driver_init...\n");

Просмотреть файл

@ -161,13 +161,13 @@ struct e1000_buffer {
struct sk_buff *skb;
dma_addr_t dma;
unsigned long time_stamp;
uint16_t length;
uint16_t next_to_watch;
u16 length;
u16 next_to_watch;
};
struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
struct e1000_ps_page_dma { u64 ps_page_dma[PS_PAGE_BUFFERS]; };
struct e1000_tx_ring {
/* pointer to the descriptor ring memory */
@ -186,9 +186,9 @@ struct e1000_tx_ring {
struct e1000_buffer *buffer_info;
spinlock_t tx_lock;
uint16_t tdh;
uint16_t tdt;
boolean_t last_tx_tso;
u16 tdh;
u16 tdt;
bool last_tx_tso;
};
struct e1000_rx_ring {
@ -213,8 +213,8 @@ struct e1000_rx_ring {
/* cpu for rx queue */
int cpu;
uint16_t rdh;
uint16_t rdt;
u16 rdh;
u16 rdt;
};
#define E1000_DESC_UNUSED(R) \
@ -237,31 +237,30 @@ struct e1000_adapter {
struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
struct vlan_group *vlgrp;
uint16_t mng_vlan_id;
uint32_t bd_number;
uint32_t rx_buffer_len;
uint32_t wol;
uint32_t smartspeed;
uint32_t en_mng_pt;
uint16_t link_speed;
uint16_t link_duplex;
u16 mng_vlan_id;
u32 bd_number;
u32 rx_buffer_len;
u32 wol;
u32 smartspeed;
u32 en_mng_pt;
u16 link_speed;
u16 link_duplex;
spinlock_t stats_lock;
#ifdef CONFIG_E1000_NAPI
spinlock_t tx_queue_lock;
#endif
atomic_t irq_sem;
unsigned int total_tx_bytes;
unsigned int total_tx_packets;
unsigned int total_rx_bytes;
unsigned int total_rx_packets;
/* Interrupt Throttle Rate */
uint32_t itr;
uint32_t itr_setting;
uint16_t tx_itr;
uint16_t rx_itr;
u32 itr;
u32 itr_setting;
u16 tx_itr;
u16 rx_itr;
struct work_struct reset_task;
uint8_t fc_autoneg;
u8 fc_autoneg;
struct timer_list blink_timer;
unsigned long led_status;
@ -270,30 +269,30 @@ struct e1000_adapter {
struct e1000_tx_ring *tx_ring; /* One per active queue */
unsigned int restart_queue;
unsigned long tx_queue_len;
uint32_t txd_cmd;
uint32_t tx_int_delay;
uint32_t tx_abs_int_delay;
uint32_t gotcl;
uint64_t gotcl_old;
uint64_t tpt_old;
uint64_t colc_old;
uint32_t tx_timeout_count;
uint32_t tx_fifo_head;
uint32_t tx_head_addr;
uint32_t tx_fifo_size;
uint8_t tx_timeout_factor;
u32 txd_cmd;
u32 tx_int_delay;
u32 tx_abs_int_delay;
u32 gotcl;
u64 gotcl_old;
u64 tpt_old;
u64 colc_old;
u32 tx_timeout_count;
u32 tx_fifo_head;
u32 tx_head_addr;
u32 tx_fifo_size;
u8 tx_timeout_factor;
atomic_t tx_fifo_stall;
boolean_t pcix_82544;
boolean_t detect_tx_hung;
bool pcix_82544;
bool detect_tx_hung;
/* RX */
#ifdef CONFIG_E1000_NAPI
boolean_t (*clean_rx) (struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
bool (*clean_rx) (struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
#else
boolean_t (*clean_rx) (struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring);
bool (*clean_rx) (struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring);
#endif
void (*alloc_rx_buf) (struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
@ -306,17 +305,17 @@ struct e1000_adapter {
int num_tx_queues;
int num_rx_queues;
uint64_t hw_csum_err;
uint64_t hw_csum_good;
uint64_t rx_hdr_split;
uint32_t alloc_rx_buff_failed;
uint32_t rx_int_delay;
uint32_t rx_abs_int_delay;
boolean_t rx_csum;
u64 hw_csum_err;
u64 hw_csum_good;
u64 rx_hdr_split;
u32 alloc_rx_buff_failed;
u32 rx_int_delay;
u32 rx_abs_int_delay;
bool rx_csum;
unsigned int rx_ps_pages;
uint32_t gorcl;
uint64_t gorcl_old;
uint16_t rx_ps_bsize0;
u32 gorcl;
u64 gorcl_old;
u16 rx_ps_bsize0;
/* OS defined structs */
@ -330,19 +329,19 @@ struct e1000_adapter {
struct e1000_phy_info phy_info;
struct e1000_phy_stats phy_stats;
uint32_t test_icr;
u32 test_icr;
struct e1000_tx_ring test_tx_ring;
struct e1000_rx_ring test_rx_ring;
int msg_enable;
boolean_t have_msi;
bool have_msi;
/* to not mess up cache alignment, always add to the bottom */
boolean_t tso_force;
boolean_t smart_power_down; /* phy smart power down */
boolean_t quad_port_a;
bool tso_force;
bool smart_power_down; /* phy smart power down */
bool quad_port_a;
unsigned long flags;
uint32_t eeprom_wol;
u32 eeprom_wol;
};
enum e1000_state_t {

Просмотреть файл

@ -36,7 +36,7 @@ extern int e1000_up(struct e1000_adapter *adapter);
extern void e1000_down(struct e1000_adapter *adapter);
extern void e1000_reinit_locked(struct e1000_adapter *adapter);
extern void e1000_reset(struct e1000_adapter *adapter);
extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
@ -289,7 +289,7 @@ e1000_set_pauseparam(struct net_device *netdev,
return retval;
}
static uint32_t
static u32
e1000_get_rx_csum(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@ -297,7 +297,7 @@ e1000_get_rx_csum(struct net_device *netdev)
}
static int
e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
e1000_set_rx_csum(struct net_device *netdev, u32 data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
adapter->rx_csum = data;
@ -309,14 +309,14 @@ e1000_set_rx_csum(struct net_device *netdev, uint32_t data)
return 0;
}
static uint32_t
static u32
e1000_get_tx_csum(struct net_device *netdev)
{
return (netdev->features & NETIF_F_HW_CSUM) != 0;
}
static int
e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
e1000_set_tx_csum(struct net_device *netdev, u32 data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@ -335,7 +335,7 @@ e1000_set_tx_csum(struct net_device *netdev, uint32_t data)
}
static int
e1000_set_tso(struct net_device *netdev, uint32_t data)
e1000_set_tso(struct net_device *netdev, u32 data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
if ((adapter->hw.mac_type < e1000_82544) ||
@ -353,11 +353,11 @@ e1000_set_tso(struct net_device *netdev, uint32_t data)
netdev->features &= ~NETIF_F_TSO6;
DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
adapter->tso_force = TRUE;
adapter->tso_force = true;
return 0;
}
static uint32_t
static u32
e1000_get_msglevel(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@ -365,7 +365,7 @@ e1000_get_msglevel(struct net_device *netdev)
}
static void
e1000_set_msglevel(struct net_device *netdev, uint32_t data)
e1000_set_msglevel(struct net_device *netdev, u32 data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data;
@ -375,7 +375,7 @@ static int
e1000_get_regs_len(struct net_device *netdev)
{
#define E1000_REGS_LEN 32
return E1000_REGS_LEN * sizeof(uint32_t);
return E1000_REGS_LEN * sizeof(u32);
}
static void
@ -384,10 +384,10 @@ e1000_get_regs(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
uint32_t *regs_buff = p;
uint16_t phy_data;
u32 *regs_buff = p;
u16 phy_data;
memset(p, 0, E1000_REGS_LEN * sizeof(uint32_t));
memset(p, 0, E1000_REGS_LEN * sizeof(u32));
regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
@ -412,44 +412,44 @@ e1000_get_regs(struct net_device *netdev,
IGP01E1000_PHY_AGC_A);
e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
IGP01E1000_PHY_PAGE_SELECT, &phy_data);
regs_buff[13] = (uint32_t)phy_data; /* cable length */
regs_buff[13] = (u32)phy_data; /* cable length */
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
IGP01E1000_PHY_AGC_B);
e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
IGP01E1000_PHY_PAGE_SELECT, &phy_data);
regs_buff[14] = (uint32_t)phy_data; /* cable length */
regs_buff[14] = (u32)phy_data; /* cable length */
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
IGP01E1000_PHY_AGC_C);
e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
IGP01E1000_PHY_PAGE_SELECT, &phy_data);
regs_buff[15] = (uint32_t)phy_data; /* cable length */
regs_buff[15] = (u32)phy_data; /* cable length */
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
IGP01E1000_PHY_AGC_D);
e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
IGP01E1000_PHY_PAGE_SELECT, &phy_data);
regs_buff[16] = (uint32_t)phy_data; /* cable length */
regs_buff[16] = (u32)phy_data; /* cable length */
regs_buff[17] = 0; /* extended 10bt distance (not needed) */
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
IGP01E1000_PHY_PAGE_SELECT, &phy_data);
regs_buff[18] = (uint32_t)phy_data; /* cable polarity */
regs_buff[18] = (u32)phy_data; /* cable polarity */
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
IGP01E1000_PHY_PCS_INIT_REG);
e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
IGP01E1000_PHY_PAGE_SELECT, &phy_data);
regs_buff[19] = (uint32_t)phy_data; /* cable polarity */
regs_buff[19] = (u32)phy_data; /* cable polarity */
regs_buff[20] = 0; /* polarity correction enabled (always) */
regs_buff[22] = 0; /* phy receive errors (unavailable) */
regs_buff[23] = regs_buff[18]; /* mdix mode */
e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
} else {
e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
regs_buff[13] = (uint32_t)phy_data; /* cable length */
regs_buff[13] = (u32)phy_data; /* cable length */
regs_buff[14] = 0; /* Dummy (to align w/ IGP phy reg dump) */
regs_buff[15] = 0; /* Dummy (to align w/ IGP phy reg dump) */
regs_buff[16] = 0; /* Dummy (to align w/ IGP phy reg dump) */
e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
regs_buff[17] = (uint32_t)phy_data; /* extended 10bt distance */
regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
regs_buff[18] = regs_buff[13]; /* cable polarity */
regs_buff[19] = 0; /* Dummy (to align w/ IGP phy reg dump) */
regs_buff[20] = regs_buff[17]; /* polarity correction */
@ -459,7 +459,7 @@ e1000_get_regs(struct net_device *netdev,
}
regs_buff[21] = adapter->phy_stats.idle_errors; /* phy idle errors */
e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
regs_buff[24] = (uint32_t)phy_data; /* phy local receiver status */
regs_buff[24] = (u32)phy_data; /* phy local receiver status */
regs_buff[25] = regs_buff[24]; /* phy remote receiver status */
if (hw->mac_type >= e1000_82540 &&
hw->mac_type < e1000_82571 &&
@ -477,14 +477,14 @@ e1000_get_eeprom_len(struct net_device *netdev)
static int
e1000_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, uint8_t *bytes)
struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
uint16_t *eeprom_buff;
u16 *eeprom_buff;
int first_word, last_word;
int ret_val = 0;
uint16_t i;
u16 i;
if (eeprom->len == 0)
return -EINVAL;
@ -494,7 +494,7 @@ e1000_get_eeprom(struct net_device *netdev,
first_word = eeprom->offset >> 1;
last_word = (eeprom->offset + eeprom->len - 1) >> 1;
eeprom_buff = kmalloc(sizeof(uint16_t) *
eeprom_buff = kmalloc(sizeof(u16) *
(last_word - first_word + 1), GFP_KERNEL);
if (!eeprom_buff)
return -ENOMEM;
@ -514,7 +514,7 @@ e1000_get_eeprom(struct net_device *netdev,
for (i = 0; i < last_word - first_word + 1; i++)
le16_to_cpus(&eeprom_buff[i]);
memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1),
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
eeprom->len);
kfree(eeprom_buff);
@ -523,14 +523,14 @@ e1000_get_eeprom(struct net_device *netdev,
static int
e1000_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, uint8_t *bytes)
struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
uint16_t *eeprom_buff;
u16 *eeprom_buff;
void *ptr;
int max_len, first_word, last_word, ret_val = 0;
uint16_t i;
u16 i;
if (eeprom->len == 0)
return -EOPNOTSUPP;
@ -590,7 +590,7 @@ e1000_get_drvinfo(struct net_device *netdev,
{
struct e1000_adapter *adapter = netdev_priv(netdev);
char firmware_version[32];
uint16_t eeprom_data;
u16 eeprom_data;
strncpy(drvinfo->driver, e1000_driver_name, 32);
strncpy(drvinfo->version, e1000_driver_version, 32);
@ -674,13 +674,13 @@ e1000_set_ringparam(struct net_device *netdev,
adapter->tx_ring = txdr;
adapter->rx_ring = rxdr;
rxdr->count = max(ring->rx_pending,(uint32_t)E1000_MIN_RXD);
rxdr->count = min(rxdr->count,(uint32_t)(mac_type < e1000_82544 ?
rxdr->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
rxdr->count = min(rxdr->count,(u32)(mac_type < e1000_82544 ?
E1000_MAX_RXD : E1000_MAX_82544_RXD));
rxdr->count = ALIGN(rxdr->count, REQ_RX_DESCRIPTOR_MULTIPLE);
txdr->count = max(ring->tx_pending,(uint32_t)E1000_MIN_TXD);
txdr->count = min(txdr->count,(uint32_t)(mac_type < e1000_82544 ?
txdr->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
txdr->count = min(txdr->count,(u32)(mac_type < e1000_82544 ?
E1000_MAX_TXD : E1000_MAX_82544_TXD));
txdr->count = ALIGN(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
@ -728,13 +728,13 @@ err_setup:
return err;
}
static bool reg_pattern_test(struct e1000_adapter *adapter, uint64_t *data,
int reg, uint32_t mask, uint32_t write)
static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
int reg, u32 mask, u32 write)
{
static const uint32_t test[] =
static const u32 test[] =
{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
uint8_t __iomem *address = adapter->hw.hw_addr + reg;
uint32_t read;
u8 __iomem *address = adapter->hw.hw_addr + reg;
u32 read;
int i;
for (i = 0; i < ARRAY_SIZE(test); i++) {
@ -751,11 +751,11 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, uint64_t *data,
return false;
}
static bool reg_set_and_check(struct e1000_adapter *adapter, uint64_t *data,
int reg, uint32_t mask, uint32_t write)
static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
int reg, u32 mask, u32 write)
{
uint8_t __iomem *address = adapter->hw.hw_addr + reg;
uint32_t read;
u8 __iomem *address = adapter->hw.hw_addr + reg;
u32 read;
writel(write & mask, address);
read = readl(address);
@ -788,10 +788,10 @@ static bool reg_set_and_check(struct e1000_adapter *adapter, uint64_t *data,
} while (0)
static int
e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
{
uint32_t value, before, after;
uint32_t i, toggle;
u32 value, before, after;
u32 i, toggle;
/* The status register is Read Only, so a write should fail.
* Some bits that get toggled are ignored.
@ -884,11 +884,11 @@ e1000_reg_test(struct e1000_adapter *adapter, uint64_t *data)
}
static int
e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
{
uint16_t temp;
uint16_t checksum = 0;
uint16_t i;
u16 temp;
u16 checksum = 0;
u16 i;
*data = 0;
/* Read and add up the contents of the EEPROM */
@ -901,7 +901,7 @@ e1000_eeprom_test(struct e1000_adapter *adapter, uint64_t *data)
}
/* If Checksum is not Correct return error else test passed */
if ((checksum != (uint16_t) EEPROM_SUM) && !(*data))
if ((checksum != (u16) EEPROM_SUM) && !(*data))
*data = 2;
return *data;
@ -919,11 +919,12 @@ e1000_test_intr(int irq, void *data)
}
static int
e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
{
struct net_device *netdev = adapter->netdev;
uint32_t mask, i=0, shared_int = TRUE;
uint32_t irq = adapter->pdev->irq;
u32 mask, i = 0;
bool shared_int = true;
u32 irq = adapter->pdev->irq;
*data = 0;
@ -931,7 +932,7 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
/* Hook up test interrupt handler just for this test */
if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
netdev))
shared_int = FALSE;
shared_int = false;
else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
netdev->name, netdev)) {
*data = 1;
@ -1069,7 +1070,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
struct e1000_tx_ring *txdr = &adapter->test_tx_ring;
struct e1000_rx_ring *rxdr = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
uint32_t rctl;
u32 rctl;
int i, ret_val;
/* Setup Tx descriptor ring and Tx buffers */
@ -1095,8 +1096,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->next_to_use = txdr->next_to_clean = 0;
E1000_WRITE_REG(&adapter->hw, TDBAL,
((uint64_t) txdr->dma & 0x00000000FFFFFFFF));
E1000_WRITE_REG(&adapter->hw, TDBAH, ((uint64_t) txdr->dma >> 32));
((u64) txdr->dma & 0x00000000FFFFFFFF));
E1000_WRITE_REG(&adapter->hw, TDBAH, ((u64) txdr->dma >> 32));
E1000_WRITE_REG(&adapter->hw, TDLEN,
txdr->count * sizeof(struct e1000_tx_desc));
E1000_WRITE_REG(&adapter->hw, TDH, 0);
@ -1152,8 +1153,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
rctl = E1000_READ_REG(&adapter->hw, RCTL);
E1000_WRITE_REG(&adapter->hw, RCTL, rctl & ~E1000_RCTL_EN);
E1000_WRITE_REG(&adapter->hw, RDBAL,
((uint64_t) rxdr->dma & 0xFFFFFFFF));
E1000_WRITE_REG(&adapter->hw, RDBAH, ((uint64_t) rxdr->dma >> 32));
((u64) rxdr->dma & 0xFFFFFFFF));
E1000_WRITE_REG(&adapter->hw, RDBAH, ((u64) rxdr->dma >> 32));
E1000_WRITE_REG(&adapter->hw, RDLEN, rxdr->size);
E1000_WRITE_REG(&adapter->hw, RDH, 0);
E1000_WRITE_REG(&adapter->hw, RDT, 0);
@ -1201,7 +1202,7 @@ e1000_phy_disable_receiver(struct e1000_adapter *adapter)
static void
e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
{
uint16_t phy_reg;
u16 phy_reg;
/* Because we reset the PHY above, we need to re-force TX_CLK in the
* Extended PHY Specific Control Register to 25MHz clock. This
@ -1225,8 +1226,8 @@ e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
static int
e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
{
uint32_t ctrl_reg;
uint16_t phy_reg;
u32 ctrl_reg;
u16 phy_reg;
/* Setup the Device Control Register for PHY loopback test. */
@ -1292,10 +1293,10 @@ e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
static int
e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
{
uint32_t ctrl_reg = 0;
uint32_t stat_reg = 0;
u32 ctrl_reg = 0;
u32 stat_reg = 0;
adapter->hw.autoneg = FALSE;
adapter->hw.autoneg = false;
if (adapter->hw.phy_type == e1000_phy_m88) {
/* Auto-MDI/MDIX Off */
@ -1362,8 +1363,8 @@ e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
static int
e1000_set_phy_loopback(struct e1000_adapter *adapter)
{
uint16_t phy_reg = 0;
uint16_t count = 0;
u16 phy_reg = 0;
u16 count = 0;
switch (adapter->hw.mac_type) {
case e1000_82543:
@ -1415,7 +1416,7 @@ static int
e1000_setup_loopback_test(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
uint32_t rctl;
u32 rctl;
if (hw->media_type == e1000_media_type_fiber ||
hw->media_type == e1000_media_type_internal_serdes) {
@ -1450,8 +1451,8 @@ static void
e1000_loopback_cleanup(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
uint32_t rctl;
uint16_t phy_reg;
u32 rctl;
u16 phy_reg;
rctl = E1000_READ_REG(hw, RCTL);
rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
@ -1473,7 +1474,7 @@ e1000_loopback_cleanup(struct e1000_adapter *adapter)
case e1000_82545_rev_3:
case e1000_82546_rev_3:
default:
hw->autoneg = TRUE;
hw->autoneg = true;
if (hw->phy_type == e1000_phy_gg82563)
e1000_write_phy_reg(hw,
GG82563_PHY_KMRN_MODE_CTRL,
@ -1577,7 +1578,7 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
}
static int
e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data)
e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
{
/* PHY loopback cannot be performed if SoL/IDER
* sessions are active */
@ -1602,18 +1603,18 @@ out:
}
static int
e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
e1000_link_test(struct e1000_adapter *adapter, u64 *data)
{
*data = 0;
if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
int i = 0;
adapter->hw.serdes_link_down = TRUE;
adapter->hw.serdes_link_down = true;
/* On some blade server designs, link establishment
* could take as long as 2-3 minutes */
do {
e1000_check_for_link(&adapter->hw);
if (adapter->hw.serdes_link_down == FALSE)
if (!adapter->hw.serdes_link_down)
return *data;
msleep(20);
} while (i++ < 3750);
@ -1646,19 +1647,19 @@ e1000_get_sset_count(struct net_device *netdev, int sset)
static void
e1000_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, uint64_t *data)
struct ethtool_test *eth_test, u64 *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
boolean_t if_running = netif_running(netdev);
bool if_running = netif_running(netdev);
set_bit(__E1000_TESTING, &adapter->flags);
if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
/* Offline tests */
/* save speed, duplex, autoneg settings */
uint16_t autoneg_advertised = adapter->hw.autoneg_advertised;
uint8_t forced_speed_duplex = adapter->hw.forced_speed_duplex;
uint8_t autoneg = adapter->hw.autoneg;
u16 autoneg_advertised = adapter->hw.autoneg_advertised;
u8 forced_speed_duplex = adapter->hw.forced_speed_duplex;
u8 autoneg = adapter->hw.autoneg;
DPRINTK(HW, INFO, "offline testing starting\n");
@ -1876,7 +1877,7 @@ e1000_led_blink_callback(unsigned long data)
}
static int
e1000_phys_id(struct net_device *netdev, uint32_t data)
e1000_phys_id(struct net_device *netdev, u32 data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
@ -1926,7 +1927,7 @@ e1000_nway_reset(struct net_device *netdev)
static void
e1000_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data)
struct ethtool_stats *stats, u64 *data)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
int i;
@ -1935,15 +1936,15 @@ e1000_get_ethtool_stats(struct net_device *netdev,
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
/* BUG_ON(i != E1000_STATS_LEN); */
}
static void
e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
e1000_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
uint8_t *p = data;
u8 *p = data;
int i;
switch (stringset) {

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -100,8 +100,8 @@ typedef enum {
} e1000_fc_type;
struct e1000_shadow_ram {
uint16_t eeprom_word;
boolean_t modified;
u16 eeprom_word;
bool modified;
};
/* PCI bus types */
@ -263,19 +263,19 @@ struct e1000_phy_info {
};
struct e1000_phy_stats {
uint32_t idle_errors;
uint32_t receive_errors;
u32 idle_errors;
u32 receive_errors;
};
struct e1000_eeprom_info {
e1000_eeprom_type type;
uint16_t word_size;
uint16_t opcode_bits;
uint16_t address_bits;
uint16_t delay_usec;
uint16_t page_size;
boolean_t use_eerd;
boolean_t use_eewr;
u16 word_size;
u16 opcode_bits;
u16 address_bits;
u16 delay_usec;
u16 page_size;
bool use_eerd;
bool use_eewr;
};
/* Flex ASF Information */
@ -308,34 +308,34 @@ typedef enum {
/* Function prototypes */
/* Initialization */
int32_t e1000_reset_hw(struct e1000_hw *hw);
int32_t e1000_init_hw(struct e1000_hw *hw);
int32_t e1000_set_mac_type(struct e1000_hw *hw);
s32 e1000_reset_hw(struct e1000_hw *hw);
s32 e1000_init_hw(struct e1000_hw *hw);
s32 e1000_set_mac_type(struct e1000_hw *hw);
void e1000_set_media_type(struct e1000_hw *hw);
/* Link Configuration */
int32_t e1000_setup_link(struct e1000_hw *hw);
int32_t e1000_phy_setup_autoneg(struct e1000_hw *hw);
s32 e1000_setup_link(struct e1000_hw *hw);
s32 e1000_phy_setup_autoneg(struct e1000_hw *hw);
void e1000_config_collision_dist(struct e1000_hw *hw);
int32_t e1000_check_for_link(struct e1000_hw *hw);
int32_t e1000_get_speed_and_duplex(struct e1000_hw *hw, uint16_t *speed, uint16_t *duplex);
int32_t e1000_force_mac_fc(struct e1000_hw *hw);
s32 e1000_check_for_link(struct e1000_hw *hw);
s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex);
s32 e1000_force_mac_fc(struct e1000_hw *hw);
/* PHY */
int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data);
int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
int32_t e1000_phy_reset(struct e1000_hw *hw);
int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 *phy_data);
s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 reg_addr, u16 data);
s32 e1000_phy_hw_reset(struct e1000_hw *hw);
s32 e1000_phy_reset(struct e1000_hw *hw);
s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
s32 e1000_validate_mdi_setting(struct e1000_hw *hw);
void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
/* EEPROM Functions */
int32_t e1000_init_eeprom_params(struct e1000_hw *hw);
s32 e1000_init_eeprom_params(struct e1000_hw *hw);
/* MNG HOST IF functions */
uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
u32 e1000_enable_mng_pass_thru(struct e1000_hw *hw);
#define E1000_MNG_DHCP_TX_PAYLOAD_CMD 64
#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8 /* Host Interface data length */
@ -354,80 +354,80 @@ uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
struct e1000_host_mng_command_header {
uint8_t command_id;
uint8_t checksum;
uint16_t reserved1;
uint16_t reserved2;
uint16_t command_length;
u8 command_id;
u8 checksum;
u16 reserved1;
u16 reserved2;
u16 command_length;
};
struct e1000_host_mng_command_info {
struct e1000_host_mng_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
uint8_t command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/
u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH]; /* Command data can length 0..0x658*/
};
#ifdef __BIG_ENDIAN
struct e1000_host_mng_dhcp_cookie{
uint32_t signature;
uint16_t vlan_id;
uint8_t reserved0;
uint8_t status;
uint32_t reserved1;
uint8_t checksum;
uint8_t reserved3;
uint16_t reserved2;
u32 signature;
u16 vlan_id;
u8 reserved0;
u8 status;
u32 reserved1;
u8 checksum;
u8 reserved3;
u16 reserved2;
};
#else
struct e1000_host_mng_dhcp_cookie{
uint32_t signature;
uint8_t status;
uint8_t reserved0;
uint16_t vlan_id;
uint32_t reserved1;
uint16_t reserved2;
uint8_t reserved3;
uint8_t checksum;
u32 signature;
u8 status;
u8 reserved0;
u16 vlan_id;
u32 reserved1;
u16 reserved2;
u8 reserved3;
u8 checksum;
};
#endif
int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
uint16_t length);
boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw);
int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
int32_t e1000_read_mac_addr(struct e1000_hw * hw);
s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer,
u16 length);
bool e1000_check_mng_mode(struct e1000_hw *hw);
bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
s32 e1000_read_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
s32 e1000_validate_eeprom_checksum(struct e1000_hw *hw);
s32 e1000_update_eeprom_checksum(struct e1000_hw *hw);
s32 e1000_write_eeprom(struct e1000_hw *hw, u16 reg, u16 words, u16 *data);
s32 e1000_read_mac_addr(struct e1000_hw * hw);
/* Filters (multicast, vlan, receive) */
uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
void e1000_write_vfta(struct e1000_hw *hw, uint32_t offset, uint32_t value);
u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 * mc_addr);
void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
void e1000_rar_set(struct e1000_hw *hw, u8 * mc_addr, u32 rar_index);
void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
/* LED functions */
int32_t e1000_setup_led(struct e1000_hw *hw);
int32_t e1000_cleanup_led(struct e1000_hw *hw);
int32_t e1000_led_on(struct e1000_hw *hw);
int32_t e1000_led_off(struct e1000_hw *hw);
int32_t e1000_blink_led_start(struct e1000_hw *hw);
s32 e1000_setup_led(struct e1000_hw *hw);
s32 e1000_cleanup_led(struct e1000_hw *hw);
s32 e1000_led_on(struct e1000_hw *hw);
s32 e1000_led_off(struct e1000_hw *hw);
s32 e1000_blink_led_start(struct e1000_hw *hw);
/* Adaptive IFS Functions */
/* Everything else */
void e1000_reset_adaptive(struct e1000_hw *hw);
void e1000_update_adaptive(struct e1000_hw *hw);
void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, uint32_t frame_len, uint8_t * mac_addr);
void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, u32 frame_len, u8 * mac_addr);
void e1000_get_bus_info(struct e1000_hw *hw);
void e1000_pci_set_mwi(struct e1000_hw *hw);
void e1000_pci_clear_mwi(struct e1000_hw *hw);
int32_t e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value);
s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc);
int e1000_pcix_get_mmrbc(struct e1000_hw *hw);
/* Port I/O is only supported on 82544 and newer */
void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
s32 e1000_disable_pciex_master(struct e1000_hw *hw);
s32 e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_READ_REG_IO(a, reg) \
@ -596,8 +596,8 @@ struct e1000_rx_desc {
__le64 buffer_addr; /* Address of the descriptor's data buffer */
__le16 length; /* Length of data DMAed into data buffer */
__le16 csum; /* Packet checksum */
uint8_t status; /* Descriptor status */
uint8_t errors; /* Descriptor Errors */
u8 status; /* Descriptor status */
u8 errors; /* Descriptor Errors */
__le16 special;
};
@ -718,15 +718,15 @@ struct e1000_tx_desc {
__le32 data;
struct {
__le16 length; /* Data buffer length */
uint8_t cso; /* Checksum offset */
uint8_t cmd; /* Descriptor control */
u8 cso; /* Checksum offset */
u8 cmd; /* Descriptor control */
} flags;
} lower;
union {
__le32 data;
struct {
uint8_t status; /* Descriptor status */
uint8_t css; /* Checksum start */
u8 status; /* Descriptor status */
u8 css; /* Checksum start */
__le16 special;
} fields;
} upper;
@ -759,16 +759,16 @@ struct e1000_context_desc {
union {
__le32 ip_config;
struct {
uint8_t ipcss; /* IP checksum start */
uint8_t ipcso; /* IP checksum offset */
u8 ipcss; /* IP checksum start */
u8 ipcso; /* IP checksum offset */
__le16 ipcse; /* IP checksum end */
} ip_fields;
} lower_setup;
union {
__le32 tcp_config;
struct {
uint8_t tucss; /* TCP checksum start */
uint8_t tucso; /* TCP checksum offset */
u8 tucss; /* TCP checksum start */
u8 tucso; /* TCP checksum offset */
__le16 tucse; /* TCP checksum end */
} tcp_fields;
} upper_setup;
@ -776,8 +776,8 @@ struct e1000_context_desc {
union {
__le32 data;
struct {
uint8_t status; /* Descriptor status */
uint8_t hdr_len; /* Header length */
u8 status; /* Descriptor status */
u8 hdr_len; /* Header length */
__le16 mss; /* Maximum segment size */
} fields;
} tcp_seg_setup;
@ -790,15 +790,15 @@ struct e1000_data_desc {
__le32 data;
struct {
__le16 length; /* Data buffer length */
uint8_t typ_len_ext; /* */
uint8_t cmd; /* */
u8 typ_len_ext; /* */
u8 cmd; /* */
} flags;
} lower;
union {
__le32 data;
struct {
uint8_t status; /* Descriptor status */
uint8_t popts; /* Packet Options */
u8 status; /* Descriptor status */
u8 popts; /* Packet Options */
__le16 special; /* */
} fields;
} upper;
@ -825,8 +825,8 @@ struct e1000_rar {
/* IPv4 Address Table Entry */
struct e1000_ipv4_at_entry {
volatile uint32_t ipv4_addr; /* IP Address (RW) */
volatile uint32_t reserved;
volatile u32 ipv4_addr; /* IP Address (RW) */
volatile u32 reserved;
};
/* Four wakeup IP addresses are supported */
@ -837,25 +837,25 @@ struct e1000_ipv4_at_entry {
/* IPv6 Address Table Entry */
struct e1000_ipv6_at_entry {
volatile uint8_t ipv6_addr[16];
volatile u8 ipv6_addr[16];
};
/* Flexible Filter Length Table Entry */
struct e1000_fflt_entry {
volatile uint32_t length; /* Flexible Filter Length (RW) */
volatile uint32_t reserved;
volatile u32 length; /* Flexible Filter Length (RW) */
volatile u32 reserved;
};
/* Flexible Filter Mask Table Entry */
struct e1000_ffmt_entry {
volatile uint32_t mask; /* Flexible Filter Mask (RW) */
volatile uint32_t reserved;
volatile u32 mask; /* Flexible Filter Mask (RW) */
volatile u32 reserved;
};
/* Flexible Filter Value Table Entry */
struct e1000_ffvt_entry {
volatile uint32_t value; /* Flexible Filter Value (RW) */
volatile uint32_t reserved;
volatile u32 value; /* Flexible Filter Value (RW) */
volatile u32 reserved;
};
/* Four Flexible Filters are supported */
@ -1309,89 +1309,89 @@ struct e1000_ffvt_entry {
/* Statistics counters collected by the MAC */
struct e1000_hw_stats {
uint64_t crcerrs;
uint64_t algnerrc;
uint64_t symerrs;
uint64_t rxerrc;
uint64_t txerrc;
uint64_t mpc;
uint64_t scc;
uint64_t ecol;
uint64_t mcc;
uint64_t latecol;
uint64_t colc;
uint64_t dc;
uint64_t tncrs;
uint64_t sec;
uint64_t cexterr;
uint64_t rlec;
uint64_t xonrxc;
uint64_t xontxc;
uint64_t xoffrxc;
uint64_t xofftxc;
uint64_t fcruc;
uint64_t prc64;
uint64_t prc127;
uint64_t prc255;
uint64_t prc511;
uint64_t prc1023;
uint64_t prc1522;
uint64_t gprc;
uint64_t bprc;
uint64_t mprc;
uint64_t gptc;
uint64_t gorcl;
uint64_t gorch;
uint64_t gotcl;
uint64_t gotch;
uint64_t rnbc;
uint64_t ruc;
uint64_t rfc;
uint64_t roc;
uint64_t rlerrc;
uint64_t rjc;
uint64_t mgprc;
uint64_t mgpdc;
uint64_t mgptc;
uint64_t torl;
uint64_t torh;
uint64_t totl;
uint64_t toth;
uint64_t tpr;
uint64_t tpt;
uint64_t ptc64;
uint64_t ptc127;
uint64_t ptc255;
uint64_t ptc511;
uint64_t ptc1023;
uint64_t ptc1522;
uint64_t mptc;
uint64_t bptc;
uint64_t tsctc;
uint64_t tsctfc;
uint64_t iac;
uint64_t icrxptc;
uint64_t icrxatc;
uint64_t ictxptc;
uint64_t ictxatc;
uint64_t ictxqec;
uint64_t ictxqmtc;
uint64_t icrxdmtc;
uint64_t icrxoc;
u64 crcerrs;
u64 algnerrc;
u64 symerrs;
u64 rxerrc;
u64 txerrc;
u64 mpc;
u64 scc;
u64 ecol;
u64 mcc;
u64 latecol;
u64 colc;
u64 dc;
u64 tncrs;
u64 sec;
u64 cexterr;
u64 rlec;
u64 xonrxc;
u64 xontxc;
u64 xoffrxc;
u64 xofftxc;
u64 fcruc;
u64 prc64;
u64 prc127;
u64 prc255;
u64 prc511;
u64 prc1023;
u64 prc1522;
u64 gprc;
u64 bprc;
u64 mprc;
u64 gptc;
u64 gorcl;
u64 gorch;
u64 gotcl;
u64 gotch;
u64 rnbc;
u64 ruc;
u64 rfc;
u64 roc;
u64 rlerrc;
u64 rjc;
u64 mgprc;
u64 mgpdc;
u64 mgptc;
u64 torl;
u64 torh;
u64 totl;
u64 toth;
u64 tpr;
u64 tpt;
u64 ptc64;
u64 ptc127;
u64 ptc255;
u64 ptc511;
u64 ptc1023;
u64 ptc1522;
u64 mptc;
u64 bptc;
u64 tsctc;
u64 tsctfc;
u64 iac;
u64 icrxptc;
u64 icrxatc;
u64 ictxptc;
u64 ictxatc;
u64 ictxqec;
u64 ictxqmtc;
u64 icrxdmtc;
u64 icrxoc;
};
/* Structure containing variables used by the shared code (e1000_hw.c) */
struct e1000_hw {
uint8_t __iomem *hw_addr;
uint8_t __iomem *flash_address;
u8 __iomem *hw_addr;
u8 __iomem *flash_address;
e1000_mac_type mac_type;
e1000_phy_type phy_type;
uint32_t phy_init_script;
u32 phy_init_script;
e1000_media_type media_type;
void *back;
struct e1000_shadow_ram *eeprom_shadow_ram;
uint32_t flash_bank_size;
uint32_t flash_base_addr;
u32 flash_bank_size;
u32 flash_base_addr;
e1000_fc_type fc;
e1000_bus_speed bus_speed;
e1000_bus_width bus_width;
@ -1400,75 +1400,75 @@ struct e1000_hw {
e1000_ms_type master_slave;
e1000_ms_type original_master_slave;
e1000_ffe_config ffe_config_state;
uint32_t asf_firmware_present;
uint32_t eeprom_semaphore_present;
uint32_t swfw_sync_present;
uint32_t swfwhw_semaphore_present;
u32 asf_firmware_present;
u32 eeprom_semaphore_present;
u32 swfw_sync_present;
u32 swfwhw_semaphore_present;
unsigned long io_base;
uint32_t phy_id;
uint32_t phy_revision;
uint32_t phy_addr;
uint32_t original_fc;
uint32_t txcw;
uint32_t autoneg_failed;
uint32_t max_frame_size;
uint32_t min_frame_size;
uint32_t mc_filter_type;
uint32_t num_mc_addrs;
uint32_t collision_delta;
uint32_t tx_packet_delta;
uint32_t ledctl_default;
uint32_t ledctl_mode1;
uint32_t ledctl_mode2;
boolean_t tx_pkt_filtering;
u32 phy_id;
u32 phy_revision;
u32 phy_addr;
u32 original_fc;
u32 txcw;
u32 autoneg_failed;
u32 max_frame_size;
u32 min_frame_size;
u32 mc_filter_type;
u32 num_mc_addrs;
u32 collision_delta;
u32 tx_packet_delta;
u32 ledctl_default;
u32 ledctl_mode1;
u32 ledctl_mode2;
bool tx_pkt_filtering;
struct e1000_host_mng_dhcp_cookie mng_cookie;
uint16_t phy_spd_default;
uint16_t autoneg_advertised;
uint16_t pci_cmd_word;
uint16_t fc_high_water;
uint16_t fc_low_water;
uint16_t fc_pause_time;
uint16_t current_ifs_val;
uint16_t ifs_min_val;
uint16_t ifs_max_val;
uint16_t ifs_step_size;
uint16_t ifs_ratio;
uint16_t device_id;
uint16_t vendor_id;
uint16_t subsystem_id;
uint16_t subsystem_vendor_id;
uint8_t revision_id;
uint8_t autoneg;
uint8_t mdix;
uint8_t forced_speed_duplex;
uint8_t wait_autoneg_complete;
uint8_t dma_fairness;
uint8_t mac_addr[NODE_ADDRESS_SIZE];
uint8_t perm_mac_addr[NODE_ADDRESS_SIZE];
boolean_t disable_polarity_correction;
boolean_t speed_downgraded;
u16 phy_spd_default;
u16 autoneg_advertised;
u16 pci_cmd_word;
u16 fc_high_water;
u16 fc_low_water;
u16 fc_pause_time;
u16 current_ifs_val;
u16 ifs_min_val;
u16 ifs_max_val;
u16 ifs_step_size;
u16 ifs_ratio;
u16 device_id;
u16 vendor_id;
u16 subsystem_id;
u16 subsystem_vendor_id;
u8 revision_id;
u8 autoneg;
u8 mdix;
u8 forced_speed_duplex;
u8 wait_autoneg_complete;
u8 dma_fairness;
u8 mac_addr[NODE_ADDRESS_SIZE];
u8 perm_mac_addr[NODE_ADDRESS_SIZE];
bool disable_polarity_correction;
bool speed_downgraded;
e1000_smart_speed smart_speed;
e1000_dsp_config dsp_config_state;
boolean_t get_link_status;
boolean_t serdes_link_down;
boolean_t tbi_compatibility_en;
boolean_t tbi_compatibility_on;
boolean_t laa_is_present;
boolean_t phy_reset_disable;
boolean_t initialize_hw_bits_disable;
boolean_t fc_send_xon;
boolean_t fc_strict_ieee;
boolean_t report_tx_early;
boolean_t adaptive_ifs;
boolean_t ifs_params_forced;
boolean_t in_ifs_mode;
boolean_t mng_reg_access_disabled;
boolean_t leave_av_bit_off;
boolean_t kmrn_lock_loss_workaround_disabled;
boolean_t bad_tx_carr_stats_fd;
boolean_t has_manc2h;
boolean_t rx_needs_kicking;
boolean_t has_smbus;
bool get_link_status;
bool serdes_link_down;
bool tbi_compatibility_en;
bool tbi_compatibility_on;
bool laa_is_present;
bool phy_reset_disable;
bool initialize_hw_bits_disable;
bool fc_send_xon;
bool fc_strict_ieee;
bool report_tx_early;
bool adaptive_ifs;
bool ifs_params_forced;
bool in_ifs_mode;
bool mng_reg_access_disabled;
bool leave_av_bit_off;
bool kmrn_lock_loss_workaround_disabled;
bool bad_tx_carr_stats_fd;
bool has_manc2h;
bool rx_needs_kicking;
bool has_smbus;
};
@ -2165,14 +2165,14 @@ typedef enum {
#define E1000_HI_COMMAND_TIMEOUT 500 /* Time in ms to process HI command */
struct e1000_host_command_header {
uint8_t command_id;
uint8_t command_length;
uint8_t command_options; /* I/F bits for command, status for return */
uint8_t checksum;
u8 command_id;
u8 command_length;
u8 command_options; /* I/F bits for command, status for return */
u8 checksum;
};
struct e1000_host_command_info {
struct e1000_host_command_header command_header; /* Command Head/Command Result Head has 4 bytes */
uint8_t command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */
u8 command_data[E1000_HI_MAX_DATA_LENGTH]; /* Command data can length 0..252 */
};
/* Host SMB register #0 */
@ -2495,7 +2495,7 @@ struct e1000_host_command_info {
/* Number of milliseconds we wait for PHY configuration done after MAC reset */
#define PHY_CFG_TIMEOUT 100
#define E1000_TX_BUFFER_SIZE ((uint32_t)1514)
#define E1000_TX_BUFFER_SIZE ((u32)1514)
/* The carrier extension symbol, as received by the NIC. */
#define CARRIER_EXTENSION 0x0F
@ -2518,11 +2518,11 @@ struct e1000_host_command_info {
* Typical use:
* ...
* if (TBI_ACCEPT) {
* accept_frame = TRUE;
* accept_frame = true;
* e1000_tbi_adjust_stats(adapter, MacAddress);
* frame_length--;
* } else {
* accept_frame = FALSE;
* accept_frame = false;
* }
* ...
*/
@ -3312,68 +3312,68 @@ struct e1000_host_command_info {
/* Offset 04h HSFSTS */
union ich8_hws_flash_status {
struct ich8_hsfsts {
#ifdef E1000_BIG_ENDIAN
uint16_t reserved2 :6;
uint16_t fldesvalid :1;
uint16_t flockdn :1;
uint16_t flcdone :1;
uint16_t flcerr :1;
uint16_t dael :1;
uint16_t berasesz :2;
uint16_t flcinprog :1;
uint16_t reserved1 :2;
#ifdef __BIG_ENDIAN
u16 reserved2 :6;
u16 fldesvalid :1;
u16 flockdn :1;
u16 flcdone :1;
u16 flcerr :1;
u16 dael :1;
u16 berasesz :2;
u16 flcinprog :1;
u16 reserved1 :2;
#else
uint16_t flcdone :1; /* bit 0 Flash Cycle Done */
uint16_t flcerr :1; /* bit 1 Flash Cycle Error */
uint16_t dael :1; /* bit 2 Direct Access error Log */
uint16_t berasesz :2; /* bit 4:3 Block/Sector Erase Size */
uint16_t flcinprog :1; /* bit 5 flash SPI cycle in Progress */
uint16_t reserved1 :2; /* bit 13:6 Reserved */
uint16_t reserved2 :6; /* bit 13:6 Reserved */
uint16_t fldesvalid :1; /* bit 14 Flash Descriptor Valid */
uint16_t flockdn :1; /* bit 15 Flash Configuration Lock-Down */
u16 flcdone :1; /* bit 0 Flash Cycle Done */
u16 flcerr :1; /* bit 1 Flash Cycle Error */
u16 dael :1; /* bit 2 Direct Access error Log */
u16 berasesz :2; /* bit 4:3 Block/Sector Erase Size */
u16 flcinprog :1; /* bit 5 flash SPI cycle in Progress */
u16 reserved1 :2; /* bit 13:6 Reserved */
u16 reserved2 :6; /* bit 13:6 Reserved */
u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
u16 flockdn :1; /* bit 15 Flash Configuration Lock-Down */
#endif
} hsf_status;
uint16_t regval;
u16 regval;
};
/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
/* Offset 06h FLCTL */
union ich8_hws_flash_ctrl {
struct ich8_hsflctl {
#ifdef E1000_BIG_ENDIAN
uint16_t fldbcount :2;
uint16_t flockdn :6;
uint16_t flcgo :1;
uint16_t flcycle :2;
uint16_t reserved :5;
#ifdef __BIG_ENDIAN
u16 fldbcount :2;
u16 flockdn :6;
u16 flcgo :1;
u16 flcycle :2;
u16 reserved :5;
#else
uint16_t flcgo :1; /* 0 Flash Cycle Go */
uint16_t flcycle :2; /* 2:1 Flash Cycle */
uint16_t reserved :5; /* 7:3 Reserved */
uint16_t fldbcount :2; /* 9:8 Flash Data Byte Count */
uint16_t flockdn :6; /* 15:10 Reserved */
u16 flcgo :1; /* 0 Flash Cycle Go */
u16 flcycle :2; /* 2:1 Flash Cycle */
u16 reserved :5; /* 7:3 Reserved */
u16 fldbcount :2; /* 9:8 Flash Data Byte Count */
u16 flockdn :6; /* 15:10 Reserved */
#endif
} hsf_ctrl;
uint16_t regval;
u16 regval;
};
/* ICH8 Flash Region Access Permissions */
union ich8_hws_flash_regacc {
struct ich8_flracc {
#ifdef E1000_BIG_ENDIAN
uint32_t gmwag :8;
uint32_t gmrag :8;
uint32_t grwa :8;
uint32_t grra :8;
#ifdef __BIG_ENDIAN
u32 gmwag :8;
u32 gmrag :8;
u32 grwa :8;
u32 grra :8;
#else
uint32_t grra :8; /* 0:7 GbE region Read Access */
uint32_t grwa :8; /* 8:15 GbE region Write Access */
uint32_t gmrag :8; /* 23:16 GbE Master Read Access Grant */
uint32_t gmwag :8; /* 31:24 GbE Master Write Access Grant */
u32 grra :8; /* 0:7 GbE region Read Access */
u32 grwa :8; /* 8:15 GbE region Write Access */
u32 gmrag :8; /* 23:16 GbE Master Read Access Grant */
u32 gmwag :8; /* 31:24 GbE Master Write Access Grant */
#endif
} hsf_flregacc;
uint16_t regval;
u16 regval;
};
/* Miscellaneous PHY bit definitions. */

Просмотреть файл

@ -127,7 +127,7 @@ int e1000_up(struct e1000_adapter *adapter);
void e1000_down(struct e1000_adapter *adapter);
void e1000_reinit_locked(struct e1000_adapter *adapter);
void e1000_reset(struct e1000_adapter *adapter);
int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
@ -169,21 +169,21 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
static int e1000_set_mac(struct net_device *netdev, void *p);
static irqreturn_t e1000_intr(int irq, void *data);
static irqreturn_t e1000_intr_msi(int irq, void *data);
static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring);
static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring);
#ifdef CONFIG_E1000_NAPI
static int e1000_clean(struct napi_struct *napi, int budget);
static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do);
#else
static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring);
static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring);
static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring);
static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring);
#endif
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
@ -203,8 +203,8 @@ static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
struct sk_buff *skb);
static void e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp);
static void e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
static void e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
static void e1000_restore_vlan(struct e1000_adapter *adapter);
static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
@ -347,7 +347,6 @@ static void e1000_free_irq(struct e1000_adapter *adapter)
static void
e1000_irq_disable(struct e1000_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
E1000_WRITE_REG(&adapter->hw, IMC, ~0);
E1000_WRITE_FLUSH(&adapter->hw);
synchronize_irq(adapter->pdev->irq);
@ -361,18 +360,16 @@ e1000_irq_disable(struct e1000_adapter *adapter)
static void
e1000_irq_enable(struct e1000_adapter *adapter)
{
if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
E1000_WRITE_FLUSH(&adapter->hw);
}
E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
E1000_WRITE_FLUSH(&adapter->hw);
}
static void
e1000_update_mng_vlan(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
uint16_t vid = adapter->hw.mng_cookie.vlan_id;
uint16_t old_vid = adapter->mng_vlan_id;
u16 vid = adapter->hw.mng_cookie.vlan_id;
u16 old_vid = adapter->mng_vlan_id;
if (adapter->vlgrp) {
if (!vlan_group_get_device(adapter->vlgrp, vid)) {
if (adapter->hw.mng_cookie.status &
@ -382,7 +379,7 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
} else
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
if ((old_vid != (uint16_t)E1000_MNG_VLAN_NONE) &&
if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
(vid != old_vid) &&
!vlan_group_get_device(adapter->vlgrp, old_vid))
e1000_vlan_rx_kill_vid(netdev, old_vid);
@ -405,8 +402,8 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
static void
e1000_release_hw_control(struct e1000_adapter *adapter)
{
uint32_t ctrl_ext;
uint32_t swsm;
u32 ctrl_ext;
u32 swsm;
/* Let firmware taken over control of h/w */
switch (adapter->hw.mac_type) {
@ -442,8 +439,8 @@ e1000_release_hw_control(struct e1000_adapter *adapter)
static void
e1000_get_hw_control(struct e1000_adapter *adapter)
{
uint32_t ctrl_ext;
uint32_t swsm;
u32 ctrl_ext;
u32 swsm;
/* Let firmware know the driver has taken over */
switch (adapter->hw.mac_type) {
@ -469,7 +466,7 @@ static void
e1000_init_manageability(struct e1000_adapter *adapter)
{
if (adapter->en_mng_pt) {
uint32_t manc = E1000_READ_REG(&adapter->hw, MANC);
u32 manc = E1000_READ_REG(&adapter->hw, MANC);
/* disable hardware interception of ARP */
manc &= ~(E1000_MANC_ARP_EN);
@ -478,7 +475,7 @@ e1000_init_manageability(struct e1000_adapter *adapter)
/* this will probably generate destination unreachable messages
* from the host OS, but the packets will be handled on SMBUS */
if (adapter->hw.has_manc2h) {
uint32_t manc2h = E1000_READ_REG(&adapter->hw, MANC2H);
u32 manc2h = E1000_READ_REG(&adapter->hw, MANC2H);
manc |= E1000_MANC_EN_MNG2HOST;
#define E1000_MNG2HOST_PORT_623 (1 << 5)
@ -496,7 +493,7 @@ static void
e1000_release_manageability(struct e1000_adapter *adapter)
{
if (adapter->en_mng_pt) {
uint32_t manc = E1000_READ_REG(&adapter->hw, MANC);
u32 manc = E1000_READ_REG(&adapter->hw, MANC);
/* re-enable hardware interception of ARP */
manc |= E1000_MANC_ARP_EN;
@ -569,7 +566,7 @@ int e1000_up(struct e1000_adapter *adapter)
void e1000_power_up_phy(struct e1000_adapter *adapter)
{
uint16_t mii_reg = 0;
u16 mii_reg = 0;
/* Just clear the power down bit to wake the phy back up */
if (adapter->hw.media_type == e1000_media_type_copper) {
@ -584,13 +581,13 @@ void e1000_power_up_phy(struct e1000_adapter *adapter)
static void e1000_power_down_phy(struct e1000_adapter *adapter)
{
/* Power down the PHY so no link is implied when interface is down *
* The PHY cannot be powered down if any of the following is TRUE *
* The PHY cannot be powered down if any of the following is true *
* (a) WoL is enabled
* (b) AMT is active
* (c) SoL/IDER session is active */
if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
adapter->hw.media_type == e1000_media_type_copper) {
uint16_t mii_reg = 0;
u16 mii_reg = 0;
switch (adapter->hw.mac_type) {
case e1000_82540:
@ -638,7 +635,6 @@ e1000_down(struct e1000_adapter *adapter)
#ifdef CONFIG_E1000_NAPI
napi_disable(&adapter->napi);
atomic_set(&adapter->irq_sem, 0);
#endif
e1000_irq_disable(adapter);
@ -671,9 +667,9 @@ e1000_reinit_locked(struct e1000_adapter *adapter)
void
e1000_reset(struct e1000_adapter *adapter)
{
uint32_t pba = 0, tx_space, min_tx_space, min_rx_space;
uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
boolean_t legacy_pba_adjust = FALSE;
u32 pba = 0, tx_space, min_tx_space, min_rx_space;
u16 fc_high_water_mark = E1000_FC_HIGH_DIFF;
bool legacy_pba_adjust = false;
/* Repartition Pba for greater than 9k mtu
* To take effect CTRL.RST is required.
@ -687,7 +683,7 @@ e1000_reset(struct e1000_adapter *adapter)
case e1000_82540:
case e1000_82541:
case e1000_82541_rev_2:
legacy_pba_adjust = TRUE;
legacy_pba_adjust = true;
pba = E1000_PBA_48K;
break;
case e1000_82545:
@ -698,7 +694,7 @@ e1000_reset(struct e1000_adapter *adapter)
break;
case e1000_82547:
case e1000_82547_rev_2:
legacy_pba_adjust = TRUE;
legacy_pba_adjust = true;
pba = E1000_PBA_30K;
break;
case e1000_82571:
@ -716,7 +712,7 @@ e1000_reset(struct e1000_adapter *adapter)
break;
}
if (legacy_pba_adjust == TRUE) {
if (legacy_pba_adjust) {
if (adapter->netdev->mtu > E1000_RXBUFFER_8192)
pba -= 8; /* allocate more FIFO for Tx */
@ -819,7 +815,7 @@ e1000_reset(struct e1000_adapter *adapter)
adapter->hw.mac_type <= e1000_82547_rev_2 &&
adapter->hw.autoneg == 1 &&
adapter->hw.autoneg_advertised == ADVERTISE_1000_FULL) {
uint32_t ctrl = E1000_READ_REG(&adapter->hw, CTRL);
u32 ctrl = E1000_READ_REG(&adapter->hw, CTRL);
/* clear phy power management bit if we are in gig only mode,
* which if enabled will attempt negotiation to 100Mb, which
* can cause a loss of link at power off or driver unload */
@ -836,7 +832,7 @@ e1000_reset(struct e1000_adapter *adapter)
if (!adapter->smart_power_down &&
(adapter->hw.mac_type == e1000_82571 ||
adapter->hw.mac_type == e1000_82572)) {
uint16_t phy_data = 0;
u16 phy_data = 0;
/* speed up time to link by disabling smart power down, ignore
* the return value of this function because there is nothing
* different we would do if it failed */
@ -930,8 +926,8 @@ e1000_probe(struct pci_dev *pdev,
static int cards_found = 0;
static int global_quad_port_a = 0; /* global ksp3 port a indication */
int i, err, pci_using_dac;
uint16_t eeprom_data = 0;
uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
u16 eeprom_data = 0;
u16 eeprom_apme_mask = E1000_EEPROM_APME;
DECLARE_MAC_BUF(mac);
if ((err = pci_enable_device(pdev)))
@ -1366,15 +1362,15 @@ e1000_sw_init(struct e1000_adapter *adapter)
e1000_set_media_type(hw);
hw->wait_autoneg_complete = FALSE;
hw->tbi_compatibility_en = TRUE;
hw->adaptive_ifs = TRUE;
hw->wait_autoneg_complete = false;
hw->tbi_compatibility_en = true;
hw->adaptive_ifs = true;
/* Copper options */
if (hw->media_type == e1000_media_type_copper) {
hw->mdix = AUTO_ALL_MODES;
hw->disable_polarity_correction = FALSE;
hw->disable_polarity_correction = false;
hw->master_slave = E1000_MASTER_SLAVE;
}
@ -1396,7 +1392,6 @@ e1000_sw_init(struct e1000_adapter *adapter)
#endif
/* Explicitly disable IRQ since the NIC can be in any state. */
atomic_set(&adapter->irq_sem, 0);
e1000_irq_disable(adapter);
spin_lock_init(&adapter->stats_lock);
@ -1576,7 +1571,7 @@ e1000_close(struct net_device *netdev)
* @start: address of beginning of memory
* @len: length of memory
**/
static boolean_t
static bool
e1000_check_64k_bound(struct e1000_adapter *adapter,
void *start, unsigned long len)
{
@ -1587,10 +1582,10 @@ e1000_check_64k_bound(struct e1000_adapter *adapter,
* write location to cross 64k boundary due to errata 23 */
if (adapter->hw.mac_type == e1000_82545 ||
adapter->hw.mac_type == e1000_82546) {
return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
}
return TRUE;
return true;
}
/**
@ -1707,10 +1702,10 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
static void
e1000_configure_tx(struct e1000_adapter *adapter)
{
uint64_t tdba;
u64 tdba;
struct e1000_hw *hw = &adapter->hw;
uint32_t tdlen, tctl, tipg, tarc;
uint32_t ipgr1, ipgr2;
u32 tdlen, tctl, tipg, tarc;
u32 ipgr1, ipgr2;
/* Setup the HW Tx Head and Tail descriptor pointers */
@ -1952,10 +1947,10 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
static void
e1000_setup_rctl(struct e1000_adapter *adapter)
{
uint32_t rctl, rfctl;
uint32_t psrctl = 0;
u32 rctl, rfctl;
u32 psrctl = 0;
#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
uint32_t pages = 0;
u32 pages = 0;
#endif
rctl = E1000_READ_REG(&adapter->hw, RCTL);
@ -2070,9 +2065,9 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
static void
e1000_configure_rx(struct e1000_adapter *adapter)
{
uint64_t rdba;
u64 rdba;
struct e1000_hw *hw = &adapter->hw;
uint32_t rdlen, rctl, rxcsum, ctrl_ext;
u32 rdlen, rctl, rxcsum, ctrl_ext;
if (adapter->rx_ps_pages) {
/* this is a 32 byte descriptor */
@ -2133,7 +2128,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
/* Enable 82543 Receive Checksum Offload for TCP and UDP */
if (hw->mac_type >= e1000_82543) {
rxcsum = E1000_READ_REG(hw, RXCSUM);
if (adapter->rx_csum == TRUE) {
if (adapter->rx_csum) {
rxcsum |= E1000_RXCSUM_TUOFL;
/* Enable 82571 IPv4 payload checksum for UDP fragments
@ -2392,7 +2387,7 @@ static void
e1000_enter_82542_rst(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
uint32_t rctl;
u32 rctl;
e1000_pci_clear_mwi(&adapter->hw);
@ -2410,7 +2405,7 @@ static void
e1000_leave_82542_rst(struct e1000_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
uint32_t rctl;
u32 rctl;
rctl = E1000_READ_REG(&adapter->hw, RCTL);
rctl &= ~E1000_RCTL_RST;
@ -2495,8 +2490,8 @@ e1000_set_rx_mode(struct net_device *netdev)
struct e1000_hw *hw = &adapter->hw;
struct dev_addr_list *uc_ptr;
struct dev_addr_list *mc_ptr;
uint32_t rctl;
uint32_t hash_value;
u32 rctl;
u32 hash_value;
int i, rar_entries = E1000_RAR_ENTRIES;
int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
E1000_NUM_MTA_REGISTERS_ICH8LAN :
@ -2600,7 +2595,7 @@ e1000_82547_tx_fifo_stall(unsigned long data)
{
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
struct net_device *netdev = adapter->netdev;
uint32_t tctl;
u32 tctl;
if (atomic_read(&adapter->tx_fifo_stall)) {
if ((E1000_READ_REG(&adapter->hw, TDT) ==
@ -2642,8 +2637,8 @@ e1000_watchdog(unsigned long data)
struct e1000_adapter *adapter = (struct e1000_adapter *) data;
struct net_device *netdev = adapter->netdev;
struct e1000_tx_ring *txdr = adapter->tx_ring;
uint32_t link, tctl;
int32_t ret_val;
u32 link, tctl;
s32 ret_val;
ret_val = e1000_check_for_link(&adapter->hw);
if ((ret_val == E1000_ERR_PHY) &&
@ -2668,8 +2663,8 @@ e1000_watchdog(unsigned long data)
if (link) {
if (!netif_carrier_ok(netdev)) {
uint32_t ctrl;
boolean_t txb2b = 1;
u32 ctrl;
bool txb2b = true;
e1000_get_speed_and_duplex(&adapter->hw,
&adapter->link_speed,
&adapter->link_duplex);
@ -2691,12 +2686,12 @@ e1000_watchdog(unsigned long data)
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
txb2b = 0;
txb2b = false;
netdev->tx_queue_len = 10;
adapter->tx_timeout_factor = 8;
break;
case SPEED_100:
txb2b = 0;
txb2b = false;
netdev->tx_queue_len = 100;
/* maybe add some timeout factor ? */
break;
@ -2704,8 +2699,8 @@ e1000_watchdog(unsigned long data)
if ((adapter->hw.mac_type == e1000_82571 ||
adapter->hw.mac_type == e1000_82572) &&
txb2b == 0) {
uint32_t tarc0;
!txb2b) {
u32 tarc0;
tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
tarc0 &= ~(1 << 21);
E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
@ -2747,7 +2742,7 @@ e1000_watchdog(unsigned long data)
/* make sure the receive unit is started */
if (adapter->hw.rx_needs_kicking) {
struct e1000_hw *hw = &adapter->hw;
uint32_t rctl = E1000_READ_REG(hw, RCTL);
u32 rctl = E1000_READ_REG(hw, RCTL);
E1000_WRITE_REG(hw, RCTL, rctl | E1000_RCTL_EN);
}
}
@ -2802,7 +2797,7 @@ e1000_watchdog(unsigned long data)
E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
/* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = TRUE;
adapter->detect_tx_hung = true;
/* With 82571 controllers, LAA may be overwritten due to controller
* reset from the other port. Set the appropriate LAA in RAR[0] */
@ -2837,7 +2832,7 @@ enum latency_range {
* @bytes: the number of bytes during this measurement interval
**/
static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
uint16_t itr_setting,
u16 itr_setting,
int packets,
int bytes)
{
@ -2889,8 +2884,8 @@ update_itr_done:
static void e1000_set_itr(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
uint16_t current_itr;
uint32_t new_itr = adapter->itr;
u16 current_itr;
u32 new_itr = adapter->itr;
if (unlikely(hw->mac_type < e1000_82540))
return;
@ -2964,9 +2959,9 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
unsigned int i;
uint32_t cmd_length = 0;
uint16_t ipcse = 0, tucse, mss;
uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
u32 cmd_length = 0;
u16 ipcse = 0, tucse, mss;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
int err;
if (skb_is_gso(skb)) {
@ -3025,19 +3020,19 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
if (++i == tx_ring->count) i = 0;
tx_ring->next_to_use = i;
return TRUE;
return true;
}
return FALSE;
return false;
}
static boolean_t
static bool
e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
struct sk_buff *skb)
{
struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
unsigned int i;
uint8_t css;
u8 css;
if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
css = skb_transport_offset(skb);
@ -3060,10 +3055,10 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
if (unlikely(++i == tx_ring->count)) i = 0;
tx_ring->next_to_use = i;
return TRUE;
return true;
}
return FALSE;
return false;
}
#define E1000_MAX_TXD_PWR 12
@ -3182,7 +3177,7 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
{
struct e1000_tx_desc *tx_desc = NULL;
struct e1000_buffer *buffer_info;
uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
unsigned int i;
if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
@ -3246,8 +3241,8 @@ e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
static int
e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct sk_buff *skb)
{
uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
@ -3274,7 +3269,7 @@ static int
e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
{
struct e1000_hw *hw = &adapter->hw;
uint16_t length, offset;
u16 length, offset;
if (vlan_tx_tag_present(skb)) {
if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
( adapter->hw.mng_cookie.status &
@ -3285,17 +3280,17 @@ e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct sk_buff *skb)
struct ethhdr *eth = (struct ethhdr *) skb->data;
if ((htons(ETH_P_IP) == eth->h_proto)) {
const struct iphdr *ip =
(struct iphdr *)((uint8_t *)skb->data+14);
(struct iphdr *)((u8 *)skb->data+14);
if (IPPROTO_UDP == ip->protocol) {
struct udphdr *udp =
(struct udphdr *)((uint8_t *)ip +
(struct udphdr *)((u8 *)ip +
(ip->ihl << 2));
if (ntohs(udp->dest) == 67) {
offset = (uint8_t *)udp + 8 - skb->data;
offset = (u8 *)udp + 8 - skb->data;
length = skb->len - offset;
return e1000_mng_write_dhcp_info(hw,
(uint8_t *)udp + 8,
(u8 *)udp + 8,
length);
}
}
@ -3375,7 +3370,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
* overrun the FIFO, adjust the max buffer len if mss
* drops. */
if (mss) {
uint8_t hdr_len;
u8 hdr_len;
max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1;
@ -3562,7 +3557,7 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
uint16_t eeprom_data = 0;
u16 eeprom_data = 0;
if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) {
@ -3657,7 +3652,7 @@ e1000_update_stats(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
unsigned long flags;
uint16_t phy_tmp;
u16 phy_tmp;
#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
@ -3834,13 +3829,10 @@ e1000_intr_msi(int irq, void *data)
#ifndef CONFIG_E1000_NAPI
int i;
#endif
uint32_t icr = E1000_READ_REG(hw, ICR);
u32 icr = E1000_READ_REG(hw, ICR);
/* in NAPI mode read ICR disables interrupts using IAM */
#ifdef CONFIG_E1000_NAPI
/* read ICR disables interrupts using IAM, so keep up with our
* enable/disable accounting */
atomic_inc(&adapter->irq_sem);
#endif
if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
hw->get_link_status = 1;
/* 80003ES2LAN workaround-- For packet buffer work-around on
@ -3849,7 +3841,7 @@ e1000_intr_msi(int irq, void *data)
if (netif_carrier_ok(netdev) &&
(adapter->hw.mac_type == e1000_80003es2lan)) {
/* disable receives */
uint32_t rctl = E1000_READ_REG(hw, RCTL);
u32 rctl = E1000_READ_REG(hw, RCTL);
E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
}
/* guard against interrupt when we're going down */
@ -3896,7 +3888,7 @@ e1000_intr(int irq, void *data)
struct net_device *netdev = data;
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
u32 rctl, icr = E1000_READ_REG(hw, ICR);
#ifndef CONFIG_E1000_NAPI
int i;
#endif
@ -3910,12 +3902,8 @@ e1000_intr(int irq, void *data)
!(icr & E1000_ICR_INT_ASSERTED)))
return IRQ_NONE;
/* Interrupt Auto-Mask...upon reading ICR,
* interrupts are masked. No need for the
* IMC write, but it does mean we should
* account for it ASAP. */
if (likely(hw->mac_type >= e1000_82571))
atomic_inc(&adapter->irq_sem);
/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
* need for the IMC write */
#endif
if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
@ -3939,7 +3927,6 @@ e1000_intr(int irq, void *data)
#ifdef CONFIG_E1000_NAPI
if (unlikely(hw->mac_type < e1000_82571)) {
/* disable interrupts, without the synchronize_irq bit */
atomic_inc(&adapter->irq_sem);
E1000_WRITE_REG(hw, IMC, ~0);
E1000_WRITE_FLUSH(hw);
}
@ -3964,10 +3951,8 @@ e1000_intr(int irq, void *data)
* in dead lock. Writing IMC forces 82547 into
* de-assertion state.
*/
if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
atomic_inc(&adapter->irq_sem);
if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
E1000_WRITE_REG(hw, IMC, ~0);
}
adapter->total_tx_bytes = 0;
adapter->total_rx_bytes = 0;
@ -4038,7 +4023,7 @@ e1000_clean(struct napi_struct *napi, int budget)
* @adapter: board private structure
**/
static boolean_t
static bool
e1000_clean_tx_irq(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring)
{
@ -4049,7 +4034,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
#ifdef CONFIG_E1000_NAPI
unsigned int count = 0;
#endif
boolean_t cleaned = FALSE;
bool cleaned = false;
unsigned int total_tx_bytes=0, total_tx_packets=0;
i = tx_ring->next_to_clean;
@ -4057,7 +4042,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
eop_desc = E1000_TX_DESC(*tx_ring, eop);
while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
for (cleaned = FALSE; !cleaned; ) {
for (cleaned = false; !cleaned; ) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop);
@ -4105,7 +4090,7 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
if (adapter->detect_tx_hung) {
/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
adapter->detect_tx_hung = FALSE;
adapter->detect_tx_hung = false;
if (tx_ring->buffer_info[eop].dma &&
time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
(adapter->tx_timeout_factor * HZ))
@ -4154,11 +4139,11 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
static void
e1000_rx_checksum(struct e1000_adapter *adapter,
uint32_t status_err, uint32_t csum,
u32 status_err, u32 csum,
struct sk_buff *skb)
{
uint16_t status = (uint16_t)status_err;
uint8_t errors = (uint8_t)(status_err >> 24);
u16 status = (u16)status_err;
u8 errors = (u8)(status_err >> 24);
skb->ip_summed = CHECKSUM_NONE;
/* 82543 or newer only */
@ -4200,7 +4185,7 @@ e1000_rx_checksum(struct e1000_adapter *adapter,
* @adapter: board private structure
**/
static boolean_t
static bool
#ifdef CONFIG_E1000_NAPI
e1000_clean_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
@ -4215,11 +4200,11 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
struct e1000_rx_desc *rx_desc, *next_rxd;
struct e1000_buffer *buffer_info, *next_buffer;
unsigned long flags;
uint32_t length;
uint8_t last_byte;
u32 length;
u8 last_byte;
unsigned int i;
int cleaned_count = 0;
boolean_t cleaned = FALSE;
bool cleaned = false;
unsigned int total_rx_bytes=0, total_rx_packets=0;
i = rx_ring->next_to_clean;
@ -4247,7 +4232,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
next_buffer = &rx_ring->buffer_info[i];
cleaned = TRUE;
cleaned = true;
cleaned_count++;
pci_unmap_single(pdev,
buffer_info->dma,
@ -4316,8 +4301,8 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
/* Receive Checksum Offload */
e1000_rx_checksum(adapter,
(uint32_t)(status) |
((uint32_t)(rx_desc->errors) << 24),
(u32)(status) |
((u32)(rx_desc->errors) << 24),
le16_to_cpu(rx_desc->csum), skb);
skb->protocol = eth_type_trans(skb, netdev);
@ -4373,7 +4358,7 @@ next_desc:
* @adapter: board private structure
**/
static boolean_t
static bool
#ifdef CONFIG_E1000_NAPI
e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring,
@ -4391,9 +4376,9 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
struct e1000_ps_page_dma *ps_page_dma;
struct sk_buff *skb;
unsigned int i, j;
uint32_t length, staterr;
u32 length, staterr;
int cleaned_count = 0;
boolean_t cleaned = FALSE;
bool cleaned = false;
unsigned int total_rx_bytes=0, total_rx_packets=0;
i = rx_ring->next_to_clean;
@ -4420,7 +4405,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
next_buffer = &rx_ring->buffer_info[i];
cleaned = TRUE;
cleaned = true;
cleaned_count++;
pci_unmap_single(pdev, buffer_info->dma,
buffer_info->length,
@ -4774,8 +4759,8 @@ no_buffers:
static void
e1000_smartspeed(struct e1000_adapter *adapter)
{
uint16_t phy_status;
uint16_t phy_ctrl;
u16 phy_status;
u16 phy_ctrl;
if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
!(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
@ -4854,8 +4839,8 @@ e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
struct e1000_adapter *adapter = netdev_priv(netdev);
struct mii_ioctl_data *data = if_mii(ifr);
int retval;
uint16_t mii_reg;
uint16_t spddplx;
u16 mii_reg;
u16 spddplx;
unsigned long flags;
if (adapter->hw.media_type != e1000_media_type_copper)
@ -4974,11 +4959,11 @@ e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
pcix_set_mmrbc(adapter->pdev, mmrbc);
}
int32_t
e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
s32
e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
{
struct e1000_adapter *adapter = hw->back;
uint16_t cap_offset;
u16 cap_offset;
cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
if (!cap_offset)
@ -4990,7 +4975,7 @@ e1000_read_pcie_cap_reg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
}
void
e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
{
outl(value, port);
}
@ -4999,9 +4984,10 @@ static void
e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
uint32_t ctrl, rctl;
u32 ctrl, rctl;
e1000_irq_disable(adapter);
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_disable(adapter);
adapter->vlgrp = grp;
if (grp) {
@ -5030,7 +5016,7 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
rctl &= ~E1000_RCTL_VFE;
E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
if (adapter->mng_vlan_id !=
(uint16_t)E1000_MNG_VLAN_NONE) {
(u16)E1000_MNG_VLAN_NONE) {
e1000_vlan_rx_kill_vid(netdev,
adapter->mng_vlan_id);
adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
@ -5038,14 +5024,15 @@ e1000_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
}
}
e1000_irq_enable(adapter);
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
}
static void
e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
uint32_t vfta, index;
u32 vfta, index;
if ((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
@ -5059,14 +5046,16 @@ e1000_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
}
static void
e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
uint32_t vfta, index;
u32 vfta, index;
e1000_irq_disable(adapter);
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_disable(adapter);
vlan_group_set_device(adapter->vlgrp, vid, NULL);
e1000_irq_enable(adapter);
if (!test_bit(__E1000_DOWN, &adapter->flags))
e1000_irq_enable(adapter);
if ((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
@ -5089,7 +5078,7 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
if (adapter->vlgrp) {
uint16_t vid;
u16 vid;
for (vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
if (!vlan_group_get_device(adapter->vlgrp, vid))
continue;
@ -5099,7 +5088,7 @@ e1000_restore_vlan(struct e1000_adapter *adapter)
}
int
e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
{
adapter->hw.autoneg = 0;
@ -5140,8 +5129,8 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
uint32_t ctrl, ctrl_ext, rctl, status;
uint32_t wufc = adapter->wol;
u32 ctrl, ctrl_ext, rctl, status;
u32 wufc = adapter->wol;
#ifdef CONFIG_PM
int retval = 0;
#endif
@ -5238,7 +5227,7 @@ e1000_resume(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev);
uint32_t err;
u32 err;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);

Просмотреть файл

@ -41,13 +41,6 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
typedef enum {
#undef FALSE
FALSE = 0,
#undef TRUE
TRUE = 1
} boolean_t;
#ifdef DBG
#define DEBUGOUT(S) printk(KERN_DEBUG S "\n")
#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S "\n", A)

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2007 Intel Corporation.
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@ -29,6 +29,9 @@
/*
* 82571EB Gigabit Ethernet Controller
* 82571EB Gigabit Ethernet Controller (Fiber)
* 82571EB Dual Port Gigabit Mezzanine Adapter
* 82571EB Quad Port Gigabit Mezzanine Adapter
* 82571PT Gigabit PT Quad Port Server ExpressModule
* 82572EI Gigabit Ethernet Controller (Copper)
* 82572EI Gigabit Ethernet Controller (Fiber)
* 82572EI Gigabit Ethernet Controller
@ -72,7 +75,7 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
if (hw->media_type != e1000_media_type_copper) {
if (hw->phy.media_type != e1000_media_type_copper) {
phy->type = e1000_phy_none;
return 0;
}
@ -150,7 +153,8 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
if (((eecd >> 15) & 0x3) == 0x3) {
nvm->type = e1000_nvm_flash_hw;
nvm->word_size = 2048;
/* Autonomous Flash update bit must be cleared due
/*
* Autonomous Flash update bit must be cleared due
* to Flash update issue.
*/
eecd &= ~E1000_EECD_AUPDEN;
@ -159,13 +163,18 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
}
/* Fall Through */
default:
nvm->type = e1000_nvm_eeprom_spi;
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
/* Added to a constant, "size" becomes the left-shift value
/*
* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
/* EEPROM access above 16k is unsupported */
if (size > 14)
size = 14;
nvm->word_size = 1 << size;
break;
}
@ -190,16 +199,16 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
case E1000_DEV_ID_82571EB_FIBER:
case E1000_DEV_ID_82572EI_FIBER:
case E1000_DEV_ID_82571EB_QUAD_FIBER:
hw->media_type = e1000_media_type_fiber;
hw->phy.media_type = e1000_media_type_fiber;
break;
case E1000_DEV_ID_82571EB_SERDES:
case E1000_DEV_ID_82572EI_SERDES:
case E1000_DEV_ID_82571EB_SERDES_DUAL:
case E1000_DEV_ID_82571EB_SERDES_QUAD:
hw->media_type = e1000_media_type_internal_serdes;
hw->phy.media_type = e1000_media_type_internal_serdes;
break;
default:
hw->media_type = e1000_media_type_copper;
hw->phy.media_type = e1000_media_type_copper;
break;
}
@ -208,25 +217,28 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
/* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES;
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid =
(er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
/* check for link */
switch (hw->media_type) {
switch (hw->phy.media_type) {
case e1000_media_type_copper:
func->setup_physical_interface = e1000_setup_copper_link_82571;
func->check_for_link = e1000e_check_for_copper_link;
func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
break;
case e1000_media_type_fiber:
func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571;
func->setup_physical_interface =
e1000_setup_fiber_serdes_link_82571;
func->check_for_link = e1000e_check_for_fiber_link;
func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes;
func->get_link_up_info =
e1000e_get_speed_and_duplex_fiber_serdes;
break;
case e1000_media_type_internal_serdes:
func->setup_physical_interface = e1000_setup_fiber_serdes_link_82571;
func->setup_physical_interface =
e1000_setup_fiber_serdes_link_82571;
func->check_for_link = e1000e_check_for_serdes_link;
func->get_link_up_info = e1000e_get_speed_and_duplex_fiber_serdes;
func->get_link_up_info =
e1000e_get_speed_and_duplex_fiber_serdes;
break;
default:
return -E1000_ERR_CONFIG;
@ -236,7 +248,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
return 0;
}
static s32 e1000_get_invariants_82571(struct e1000_adapter *adapter)
static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
static int global_quad_port_a; /* global port a indication */
@ -322,10 +334,12 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
/* The 82571 firmware may still be configuring the PHY.
/*
* The 82571 firmware may still be configuring the PHY.
* In this case, we cannot access the PHY until the
* configuration is done. So we explicitly set the
* PHY ID. */
* PHY ID.
*/
phy->id = IGP01E1000_I_PHY_ID;
break;
case e1000_82573:
@ -479,8 +493,10 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* If our nvm is an EEPROM, then we're done
* otherwise, commit the checksum to the flash NVM. */
/*
* If our nvm is an EEPROM, then we're done
* otherwise, commit the checksum to the flash NVM.
*/
if (hw->nvm.type != e1000_nvm_flash_hw)
return ret_val;
@ -496,7 +512,8 @@ static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
/* Reset the firmware if using STM opcode. */
if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) {
/* The enabling of and the actual reset must be done
/*
* The enabling of and the actual reset must be done
* in two write cycles.
*/
ew32(HICR, E1000_HICR_FW_RESET_ENABLE);
@ -557,8 +574,10 @@ static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
u32 eewr = 0;
s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words,
* and not enough words. */
/*
* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
hw_dbg(hw, "nvm parameter(s) out of bounds\n");
@ -645,30 +664,32 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
} else {
data &= ~IGP02E1000_PM_D0_LPLU;
ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used
/*
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained. */
* SmartSpeed, so performance is maintained.
*/
if (phy->smart_speed == e1000_smart_speed_on) {
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
&data);
if (ret_val)
return ret_val;
data |= IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
data);
if (ret_val)
return ret_val;
} else if (phy->smart_speed == e1000_smart_speed_off) {
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
&data);
if (ret_val)
return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
data);
if (ret_val)
return ret_val;
}
@ -693,7 +714,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
s32 ret_val;
u16 i = 0;
/* Prevent the PCI-E bus from sticking if there is no TLP connection
/*
* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
@ -709,8 +731,10 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
msleep(10);
/* Must acquire the MDIO ownership before MAC reset.
* Ownership defaults to firmware after a reset. */
/*
* Must acquire the MDIO ownership before MAC reset.
* Ownership defaults to firmware after a reset.
*/
if (hw->mac.type == e1000_82573) {
extcnf_ctrl = er32(EXTCNF_CTRL);
extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
@ -747,7 +771,8 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
/* We don't want to continue accessing MAC registers. */
return ret_val;
/* Phy configuration from NVM just starts after EECD_AUTO_RD is set.
/*
* Phy configuration from NVM just starts after EECD_AUTO_RD is set.
* Need to wait for Phy configuration completion before accessing
* NVM and Phy.
*/
@ -793,7 +818,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
e1000e_clear_vfta(hw);
/* Setup the receive address. */
/* If, however, a locally administered address was assigned to the
/*
* If, however, a locally administered address was assigned to the
* 82571, we must reserve a RAR for it to work around an issue where
* resetting one port will reload the MAC on the other port.
*/
@ -810,19 +836,19 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
ret_val = e1000_setup_link_82571(hw);
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL);
reg_data = er32(TXDCTL(0));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB |
E1000_TXDCTL_COUNT_DESC;
ew32(TXDCTL, reg_data);
ew32(TXDCTL(0), reg_data);
/* ...for both queues. */
if (mac->type != e1000_82573) {
reg_data = er32(TXDCTL1);
reg_data = er32(TXDCTL(1));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB |
E1000_TXDCTL_COUNT_DESC;
ew32(TXDCTL1, reg_data);
ew32(TXDCTL(1), reg_data);
} else {
e1000e_enable_tx_pkt_filtering(hw);
reg_data = er32(GCR);
@ -830,7 +856,8 @@ static s32 e1000_init_hw_82571(struct e1000_hw *hw)
ew32(GCR, reg_data);
}
/* Clear all of the statistics registers (clear on read). It is
/*
* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
@ -851,17 +878,17 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
u32 reg;
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL);
reg = er32(TXDCTL(0));
reg |= (1 << 22);
ew32(TXDCTL, reg);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL1);
reg = er32(TXDCTL(1));
reg |= (1 << 22);
ew32(TXDCTL1, reg);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
reg = er32(TARC0);
reg = er32(TARC(0));
reg &= ~(0xF << 27); /* 30:27 */
switch (hw->mac.type) {
case e1000_82571:
@ -871,10 +898,10 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
default:
break;
}
ew32(TARC0, reg);
ew32(TARC(0), reg);
/* Transmit Arbitration Control 1 */
reg = er32(TARC1);
reg = er32(TARC(1));
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
@ -884,7 +911,7 @@ static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
reg &= ~(1 << 28);
else
reg |= (1 << 28);
ew32(TARC1, reg);
ew32(TARC(1), reg);
break;
default:
break;
@ -922,7 +949,8 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
if (hw->mac.type == e1000_82573) {
if (hw->mng_cookie.vlan_id != 0) {
/* The VFTA is a 4096b bit-field, each identifying
/*
* The VFTA is a 4096b bit-field, each identifying
* a single VLAN ID. The following operations
* determine which 32b entry (i.e. offset) into the
* array we want to set the VLAN ID (i.e. bit) of
@ -936,7 +964,8 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
}
}
for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
/* If the offset we want to clear is the same offset of the
/*
* If the offset we want to clear is the same offset of the
* manageability VLAN ID, then clear all bits except that of
* the manageability unit.
*/
@ -947,7 +976,7 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
}
/**
* e1000_mc_addr_list_update_82571 - Update Multicast addresses
* e1000_update_mc_addr_list_82571 - Update Multicast addresses
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
@ -959,7 +988,7 @@ void e1000e_clear_vfta(struct e1000_hw *hw)
* The parameter rar_count will usually be hw->mac.rar_entry_count
* unless there are workarounds that change this.
**/
static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw,
static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
u8 *mc_addr_list,
u32 mc_addr_count,
u32 rar_used_count,
@ -968,8 +997,8 @@ static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw,
if (e1000e_get_laa_state_82571(hw))
rar_count--;
e1000e_mc_addr_list_update_generic(hw, mc_addr_list, mc_addr_count,
rar_used_count, rar_count);
e1000e_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count,
rar_used_count, rar_count);
}
/**
@ -984,12 +1013,13 @@ static void e1000_mc_addr_list_update_82571(struct e1000_hw *hw,
**/
static s32 e1000_setup_link_82571(struct e1000_hw *hw)
{
/* 82573 does not have a word in the NVM to determine
/*
* 82573 does not have a word in the NVM to determine
* the default flow control setting, so we explicitly
* set it to full.
*/
if (hw->mac.type == e1000_82573)
hw->mac.fc = e1000_fc_full;
hw->fc.type = e1000_fc_full;
return e1000e_setup_link(hw);
}
@ -1050,14 +1080,14 @@ static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
switch (hw->mac.type) {
case e1000_82571:
case e1000_82572:
/* If SerDes loopback mode is entered, there is no form
/*
* If SerDes loopback mode is entered, there is no form
* of reset to take the adapter out of that mode. So we
* have to explicitly take the adapter out of loopback
* mode. This prevents drivers from twiddling their thumbs
* if another tool failed to take it out of loopback mode.
*/
ew32(SCTL,
E1000_SCTL_DISABLE_SERDES_LOOPBACK);
ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
break;
default:
break;
@ -1124,7 +1154,8 @@ void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
/* If workaround is activated... */
if (state)
/* Hold a copy of the LAA in RAR[14] This is done so that
/*
* Hold a copy of the LAA in RAR[14] This is done so that
* between the time RAR[0] gets clobbered and the time it
* gets fixed, the actual LAA is in one of the RARs and no
* incoming packets directed to this port are dropped.
@ -1152,7 +1183,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
if (nvm->type != e1000_nvm_flash_hw)
return 0;
/* Check bit 4 of word 10h. If it is 0, firmware is done updating
/*
* Check bit 4 of word 10h. If it is 0, firmware is done updating
* 10h-12h. Checksum may need to be fixed.
*/
ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
@ -1160,7 +1192,8 @@ static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
return ret_val;
if (!(data & 0x10)) {
/* Read 0x23 and check bit 15. This bit is a 1
/*
* Read 0x23 and check bit 15. This bit is a 1
* when the checksum has already been fixed. If
* the checksum is still wrong and this bit is a
* 1, we need to return bad checksum. Otherwise,
@ -1240,7 +1273,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
/* .get_link_up_info: media type dependent */
.led_on = e1000e_led_on_generic,
.led_off = e1000e_led_off_generic,
.mc_addr_list_update = e1000_mc_addr_list_update_82571,
.update_mc_addr_list = e1000_update_mc_addr_list_82571,
.reset_hw = e1000_reset_hw_82571,
.init_hw = e1000_init_hw_82571,
.setup_link = e1000_setup_link_82571,
@ -1304,7 +1337,7 @@ struct e1000_info e1000_82571_info = {
| FLAG_TARC_SPEED_MODE_BIT /* errata */
| FLAG_APME_CHECK_PORT_B,
.pba = 38,
.get_invariants = e1000_get_invariants_82571,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_igp,
.nvm_ops = &e82571_nvm_ops,
@ -1322,7 +1355,7 @@ struct e1000_info e1000_82572_info = {
| FLAG_HAS_STATS_ICR_ICT
| FLAG_TARC_SPEED_MODE_BIT, /* errata */
.pba = 38,
.get_invariants = e1000_get_invariants_82571,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_igp,
.nvm_ops = &e82571_nvm_ops,
@ -1342,7 +1375,7 @@ struct e1000_info e1000_82573_info = {
| FLAG_HAS_ERT
| FLAG_HAS_SWSM_ON_LOAD,
.pba = 20,
.get_invariants = e1000_get_invariants_82571,
.get_variants = e1000_get_variants_82571,
.mac_ops = &e82571_mac_ops,
.phy_ops = &e82_phy_ops_m88,
.nvm_ops = &e82571_nvm_ops,

Просмотреть файл

@ -1,7 +1,7 @@
################################################################################
#
# Intel PRO/1000 Linux driver
# Copyright(c) 1999 - 2007 Intel Corporation.
# Copyright(c) 1999 - 2008 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2007 Intel Corporation.
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@ -120,10 +120,10 @@
#define E1000_MANC_ARP_EN 0x00002000 /* Enable ARP Request Filtering */
#define E1000_MANC_RCV_TCO_EN 0x00020000 /* Receive TCO Packets Enabled */
#define E1000_MANC_BLK_PHY_RST_ON_IDE 0x00040000 /* Block phy resets */
#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000 /* Enable MAC address
* filtering */
#define E1000_MANC_EN_MNG2HOST 0x00200000 /* Enable MNG packets to host
* memory */
/* Enable MAC address filtering */
#define E1000_MANC_EN_MAC_ADDR_FILTER 0x00100000
/* Enable MNG packets to host memory */
#define E1000_MANC_EN_MNG2HOST 0x00200000
/* Receive Control */
#define E1000_RCTL_EN 0x00000002 /* enable */
@ -135,25 +135,26 @@
#define E1000_RCTL_LBM_MAC 0x00000040 /* MAC loopback mode */
#define E1000_RCTL_LBM_TCVR 0x000000C0 /* tcvr loopback mode */
#define E1000_RCTL_DTYP_PS 0x00000400 /* Packet Split descriptor */
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* rx desc min threshold size */
#define E1000_RCTL_RDMTS_HALF 0x00000000 /* Rx desc min threshold size */
#define E1000_RCTL_MO_SHIFT 12 /* multicast offset shift */
#define E1000_RCTL_BAM 0x00008000 /* broadcast enable */
/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
#define E1000_RCTL_SZ_2048 0x00000000 /* rx buffer size 2048 */
#define E1000_RCTL_SZ_1024 0x00010000 /* rx buffer size 1024 */
#define E1000_RCTL_SZ_512 0x00020000 /* rx buffer size 512 */
#define E1000_RCTL_SZ_256 0x00030000 /* rx buffer size 256 */
#define E1000_RCTL_SZ_2048 0x00000000 /* Rx buffer size 2048 */
#define E1000_RCTL_SZ_1024 0x00010000 /* Rx buffer size 1024 */
#define E1000_RCTL_SZ_512 0x00020000 /* Rx buffer size 512 */
#define E1000_RCTL_SZ_256 0x00030000 /* Rx buffer size 256 */
/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
#define E1000_RCTL_SZ_16384 0x00010000 /* rx buffer size 16384 */
#define E1000_RCTL_SZ_8192 0x00020000 /* rx buffer size 8192 */
#define E1000_RCTL_SZ_4096 0x00030000 /* rx buffer size 4096 */
#define E1000_RCTL_SZ_16384 0x00010000 /* Rx buffer size 16384 */
#define E1000_RCTL_SZ_8192 0x00020000 /* Rx buffer size 8192 */
#define E1000_RCTL_SZ_4096 0x00030000 /* Rx buffer size 4096 */
#define E1000_RCTL_VFE 0x00040000 /* vlan filter enable */
#define E1000_RCTL_CFIEN 0x00080000 /* canonical form enable */
#define E1000_RCTL_CFI 0x00100000 /* canonical form indicator */
#define E1000_RCTL_BSEX 0x02000000 /* Buffer size extension */
#define E1000_RCTL_SECRC 0x04000000 /* Strip Ethernet CRC */
/* Use byte values for the following shift parameters
/*
* Use byte values for the following shift parameters
* Usage:
* psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
* E1000_PSRCTL_BSIZE0_MASK) |
@ -206,7 +207,8 @@
#define E1000_CTRL_VME 0x40000000 /* IEEE VLAN mode enable */
#define E1000_CTRL_PHY_RST 0x80000000 /* PHY Reset */
/* Bit definitions for the Management Data IO (MDIO) and Management Data
/*
* Bit definitions for the Management Data IO (MDIO) and Management Data
* Clock (MDC) pins in the Device Control Register.
*/
@ -279,7 +281,7 @@
#define E1000_TXD_STAT_TC 0x00000004 /* Tx Underrun */
/* Transmit Control */
#define E1000_TCTL_EN 0x00000002 /* enable tx */
#define E1000_TCTL_EN 0x00000002 /* enable Tx */
#define E1000_TCTL_PSP 0x00000008 /* pad short packets */
#define E1000_TCTL_CT 0x00000ff0 /* collision threshold */
#define E1000_TCTL_COLD 0x003ff000 /* collision distance */
@ -337,8 +339,8 @@
#define E1000_KABGTXD_BGSQLBIAS 0x00050000
/* PBA constants */
#define E1000_PBA_8K 0x0008 /* 8KB, default Rx allocation */
#define E1000_PBA_16K 0x0010 /* 16KB, default TX allocation */
#define E1000_PBA_8K 0x0008 /* 8KB */
#define E1000_PBA_16K 0x0010 /* 16KB */
#define E1000_PBS_16K E1000_PBA_16K
@ -356,12 +358,13 @@
/* Interrupt Cause Read */
#define E1000_ICR_TXDW 0x00000001 /* Transmit desc written back */
#define E1000_ICR_LSC 0x00000004 /* Link Status Change */
#define E1000_ICR_RXSEQ 0x00000008 /* rx sequence error */
#define E1000_ICR_RXDMT0 0x00000010 /* rx desc min. threshold (0) */
#define E1000_ICR_RXT0 0x00000080 /* rx timer intr (ring 0) */
#define E1000_ICR_RXSEQ 0x00000008 /* Rx sequence error */
#define E1000_ICR_RXDMT0 0x00000010 /* Rx desc min. threshold (0) */
#define E1000_ICR_RXT0 0x00000080 /* Rx timer intr (ring 0) */
#define E1000_ICR_INT_ASSERTED 0x80000000 /* If this bit asserted, the driver should claim the interrupt */
/* This defines the bits that are set in the Interrupt Mask
/*
* This defines the bits that are set in the Interrupt Mask
* Set/Read Register. Each bit is documented below:
* o RXT0 = Receiver Timer Interrupt (ring 0)
* o TXDW = Transmit Descriptor Written Back
@ -379,21 +382,22 @@
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
#define E1000_IMS_LSC E1000_ICR_LSC /* Link Status Change */
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* rx sequence error */
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* rx timer intr */
#define E1000_IMS_RXSEQ E1000_ICR_RXSEQ /* Rx sequence error */
#define E1000_IMS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
#define E1000_IMS_RXT0 E1000_ICR_RXT0 /* Rx timer intr */
/* Interrupt Cause Set */
#define E1000_ICS_LSC E1000_ICR_LSC /* Link Status Change */
#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* rx desc min. threshold */
#define E1000_ICS_RXDMT0 E1000_ICR_RXDMT0 /* Rx desc min. threshold */
/* Transmit Descriptor Control */
#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
still to be processed. */
/* Enable the counting of desc. still to be processed. */
#define E1000_TXDCTL_COUNT_DESC 0x00400000
/* Flow Control Constants */
#define FLOW_CONTROL_ADDRESS_LOW 0x00C28001
@ -404,7 +408,8 @@
#define E1000_VLAN_FILTER_TBL_SIZE 128 /* VLAN Filter Table (4096 bits) */
/* Receive Address */
/* Number of high/low register pairs in the RAR. The RAR (Receive Address
/*
* Number of high/low register pairs in the RAR. The RAR (Receive Address
* Registers) holds the directed and multicast addresses that we monitor.
* Technically, we have 16 spots. However, we reserve one of these spots
* (RAR[15]) for our directed address used by controllers with
@ -533,8 +538,8 @@
#define E1000_EECD_REQ 0x00000040 /* NVM Access Request */
#define E1000_EECD_GNT 0x00000080 /* NVM Access Grant */
#define E1000_EECD_SIZE 0x00000200 /* NVM Size (0=64 word 1=256 word) */
#define E1000_EECD_ADDR_BITS 0x00000400 /* NVM Addressing bits based on type
* (0-small, 1-large) */
/* NVM Addressing bits based on type (0-small, 1-large) */
#define E1000_EECD_ADDR_BITS 0x00000400
#define E1000_NVM_GRANT_ATTEMPTS 1000 /* NVM # attempts to gain grant */
#define E1000_EECD_AUTO_RD 0x00000200 /* NVM Auto Read done */
#define E1000_EECD_SIZE_EX_MASK 0x00007800 /* NVM Size */
@ -626,7 +631,8 @@
#define MAX_PHY_MULTI_PAGE_REG 0xF
/* Bit definitions for valid PHY IDs. */
/* I = Integrated
/*
* I = Integrated
* E = External
*/
#define M88E1000_E_PHY_ID 0x01410C50
@ -653,37 +659,37 @@
#define M88E1000_PSCR_MDI_MANUAL_MODE 0x0000 /* MDI Crossover Mode bits 6:5 */
/* Manual MDI configuration */
#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020 /* Manual MDIX configuration */
#define M88E1000_PSCR_AUTO_X_1000T 0x0040 /* 1000BASE-T: Auto crossover,
* 100BASE-TX/10BASE-T:
* MDI Mode
*/
#define M88E1000_PSCR_AUTO_X_MODE 0x0060 /* Auto crossover enabled
* all speeds.
*/
/* 1=Enable Extended 10BASE-T distance
* (Lower 10BASE-T RX Threshold)
* 0=Normal 10BASE-T RX Threshold */
/* 1=5-Bit interface in 100BASE-TX
* 0=MII interface in 100BASE-TX */
#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
#define M88E1000_PSCR_AUTO_X_1000T 0x0040
/* Auto crossover enabled all speeds */
#define M88E1000_PSCR_AUTO_X_MODE 0x0060
/*
* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
* 0=Normal 10BASE-T Rx Threshold
*/
#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
/* M88E1000 PHY Specific Status Register */
#define M88E1000_PSSR_REV_POLARITY 0x0002 /* 1=Polarity reversed */
#define M88E1000_PSSR_DOWNSHIFT 0x0020 /* 1=Downshifted */
#define M88E1000_PSSR_MDIX 0x0040 /* 1=MDIX; 0=MDI */
#define M88E1000_PSSR_CABLE_LENGTH 0x0380 /* 0=<50M;1=50-80M;2=80-110M;
* 3=110-140M;4=>140M */
/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */
#define M88E1000_PSSR_CABLE_LENGTH 0x0380
#define M88E1000_PSSR_SPEED 0xC000 /* Speed, bits 14:15 */
#define M88E1000_PSSR_1000MBS 0x8000 /* 10=1000Mbs */
#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
/* Number of times we will attempt to autonegotiate before downshifting if we
* are the master */
/*
* Number of times we will attempt to autonegotiate before downshifting if we
* are the master
*/
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X 0x0000
/* Number of times we will attempt to autonegotiate before downshifting if we
* are the slave */
/*
* Number of times we will attempt to autonegotiate before downshifting if we
* are the slave
*/
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK 0x0300
#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X 0x0100
#define M88E1000_EPSCR_TX_CLK_25 0x0070 /* 25 MHz TX_CLK */
@ -692,7 +698,8 @@
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK 0x0E00
#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X 0x0800
/* Bits...
/*
* Bits...
* 15-5: page
* 4-0: register offset
*/

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2007 Intel Corporation.
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@ -61,7 +61,7 @@ struct e1000_info;
ndev_printk(KERN_NOTICE , netdev, format, ## arg)
/* TX/RX descriptor defines */
/* Tx/Rx descriptor defines */
#define E1000_DEFAULT_TXD 256
#define E1000_MAX_TXD 4096
#define E1000_MIN_TXD 80
@ -114,13 +114,13 @@ struct e1000_buffer {
dma_addr_t dma;
struct sk_buff *skb;
union {
/* TX */
/* Tx */
struct {
unsigned long time_stamp;
u16 length;
u16 next_to_watch;
};
/* RX */
/* Rx */
/* arrays of page information for packet split */
struct e1000_ps_page *ps_pages;
};
@ -167,9 +167,6 @@ struct e1000_adapter {
spinlock_t tx_queue_lock; /* prevent concurrent tail updates */
/* this is still needed for 82571 and above */
atomic_t irq_sem;
/* track device up/down/testing state */
unsigned long state;
@ -180,7 +177,7 @@ struct e1000_adapter {
u16 rx_itr;
/*
* TX
* Tx
*/
struct e1000_ring *tx_ring /* One per active queue */
____cacheline_aligned_in_smp;
@ -202,7 +199,7 @@ struct e1000_adapter {
unsigned int total_rx_bytes;
unsigned int total_rx_packets;
/* TX stats */
/* Tx stats */
u64 tpt_old;
u64 colc_old;
u64 gotcl_old;
@ -214,7 +211,7 @@ struct e1000_adapter {
u32 tx_dma_failed;
/*
* RX
* Rx
*/
bool (*clean_rx) (struct e1000_adapter *adapter,
int *work_done, int work_to_do)
@ -226,7 +223,7 @@ struct e1000_adapter {
u32 rx_int_delay;
u32 rx_abs_int_delay;
/* RX stats */
/* Rx stats */
u64 hw_csum_err;
u64 hw_csum_good;
u64 rx_hdr_split;
@ -237,6 +234,8 @@ struct e1000_adapter {
unsigned int rx_ps_pages;
u16 rx_ps_bsize0;
u32 max_frame_size;
u32 min_frame_size;
/* OS defined structs */
struct net_device *netdev;
@ -261,7 +260,7 @@ struct e1000_adapter {
u32 wol;
u32 pba;
u8 fc_autoneg;
bool fc_autoneg;
unsigned long led_status;
@ -272,7 +271,7 @@ struct e1000_info {
enum e1000_mac_type mac;
unsigned int flags;
u32 pba;
s32 (*get_invariants)(struct e1000_adapter *);
s32 (*get_variants)(struct e1000_adapter *);
struct e1000_mac_operations *mac_ops;
struct e1000_phy_operations *phy_ops;
struct e1000_nvm_operations *nvm_ops;
@ -308,6 +307,7 @@ struct e1000_info {
#define FLAG_MSI_ENABLED (1 << 27)
#define FLAG_RX_CSUM_ENABLED (1 << 28)
#define FLAG_TSO_FORCE (1 << 29)
#define FLAG_RX_RESTART_NOW (1 << 30)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
@ -357,7 +357,7 @@ extern struct e1000_info e1000_ich8_info;
extern struct e1000_info e1000_ich9_info;
extern struct e1000_info e1000_es2_info;
extern s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num);
extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num);
extern s32 e1000e_commit_phy(struct e1000_hw *hw);
@ -390,9 +390,11 @@ extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
extern s32 e1000e_setup_link(struct e1000_hw *hw);
extern void e1000e_clear_vfta(struct e1000_hw *hw);
extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
extern void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count);
extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
u8 *mc_addr_list,
u32 mc_addr_count,
u32 rar_used_count,
u32 rar_count);
extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
@ -462,7 +464,6 @@ extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
extern s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
extern void e1000e_release_nvm(struct e1000_hw *hw);

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2007 Intel Corporation.
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@ -92,7 +92,8 @@
/* In-Band Control Register (Page 194, Register 18) */
#define GG82563_ICR_DIS_PADDING 0x0010 /* Disable Padding */
/* A table for the GG82563 cable length where the range is defined
/*
* A table for the GG82563 cable length where the range is defined
* with a lower bound at "index" and the upper bound at
* "index + 5".
*/
@ -118,7 +119,7 @@ static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
struct e1000_phy_info *phy = &hw->phy;
s32 ret_val;
if (hw->media_type != e1000_media_type_copper) {
if (hw->phy.media_type != e1000_media_type_copper) {
phy->type = e1000_phy_none;
return 0;
}
@ -167,15 +168,20 @@ static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
break;
}
nvm->type = e1000_nvm_eeprom_spi;
nvm->type = e1000_nvm_eeprom_spi;
size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
E1000_EECD_SIZE_EX_SHIFT);
/* Added to a constant, "size" becomes the left-shift value
/*
* Added to a constant, "size" becomes the left-shift value
* for setting word_size.
*/
size += NVM_WORD_SIZE_BASE_SHIFT;
/* EEPROM access above 16k is unsupported */
if (size > 14)
size = 14;
nvm->word_size = 1 << size;
return 0;
@ -196,10 +202,10 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
/* Set media type */
switch (adapter->pdev->device) {
case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
hw->media_type = e1000_media_type_internal_serdes;
hw->phy.media_type = e1000_media_type_internal_serdes;
break;
default:
hw->media_type = e1000_media_type_copper;
hw->phy.media_type = e1000_media_type_copper;
break;
}
@ -208,11 +214,10 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
/* Set rar entry count */
mac->rar_entry_count = E1000_RAR_ENTRIES;
/* Set if manageability features are enabled. */
mac->arc_subsystem_valid =
(er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
mac->arc_subsystem_valid = (er32(FWSM) & E1000_FWSM_MODE_MASK) ? 1 : 0;
/* check for link */
switch (hw->media_type) {
switch (hw->phy.media_type) {
case e1000_media_type_copper:
func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
func->check_for_link = e1000e_check_for_copper_link;
@ -233,7 +238,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
return 0;
}
static s32 e1000_get_invariants_80003es2lan(struct e1000_adapter *adapter)
static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
s32 rc;
@ -344,8 +349,10 @@ static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
if (!(swfw_sync & (fwmask | swmask)))
break;
/* Firmware currently using resource (fwmask)
* or other software thread using resource (swmask) */
/*
* Firmware currently using resource (fwmask)
* or other software thread using resource (swmask)
*/
e1000e_put_hw_semaphore(hw);
mdelay(5);
i++;
@ -407,7 +414,8 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG)
page_select = GG82563_PHY_PAGE_SELECT;
else
/* Use Alternative Page Select register to access
/*
* Use Alternative Page Select register to access
* registers 30 and 31
*/
page_select = GG82563_PHY_PAGE_SELECT_ALT;
@ -417,7 +425,8 @@ static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
if (ret_val)
return ret_val;
/* The "ready" bit in the MDIC register may be incorrectly set
/*
* The "ready" bit in the MDIC register may be incorrectly set
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
@ -462,7 +471,8 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG)
page_select = GG82563_PHY_PAGE_SELECT;
else
/* Use Alternative Page Select register to access
/*
* Use Alternative Page Select register to access
* registers 30 and 31
*/
page_select = GG82563_PHY_PAGE_SELECT_ALT;
@ -473,7 +483,8 @@ static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
return ret_val;
/* The "ready" bit in the MDIC register may be incorrectly set
/*
* The "ready" bit in the MDIC register may be incorrectly set
* before the device has completed the "Page Select" MDI
* transaction. So we wait 200us after each MDI command...
*/
@ -554,7 +565,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
u16 phy_data;
bool link;
/* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
/*
* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@ -583,7 +595,7 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
udelay(1);
if (hw->phy.wait_for_link) {
if (hw->phy.autoneg_wait_to_complete) {
hw_dbg(hw, "Waiting for forced speed/duplex link "
"on GG82563 phy.\n");
@ -593,7 +605,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
return ret_val;
if (!link) {
/* We didn't get link.
/*
* We didn't get link.
* Reset the DSP and cross our fingers.
*/
ret_val = e1000e_phy_reset_dsp(hw);
@ -612,7 +625,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* Resetting the phy means we need to verify the TX_CLK corresponds
/*
* Resetting the phy means we need to verify the TX_CLK corresponds
* to the link speed. 10Mbps -> 2.5MHz, else 25MHz.
*/
phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
@ -621,7 +635,8 @@ static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
else
phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
/* In addition, we must re-enable CRS on Tx for both half and full
/*
* In addition, we must re-enable CRS on Tx for both half and full
* duplex.
*/
phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
@ -671,7 +686,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
{
s32 ret_val;
if (hw->media_type == e1000_media_type_copper) {
if (hw->phy.media_type == e1000_media_type_copper) {
ret_val = e1000e_get_speed_and_duplex_copper(hw,
speed,
duplex);
@ -704,7 +719,8 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
u32 icr;
s32 ret_val;
/* Prevent the PCI-E bus from sticking if there is no TLP connection
/*
* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
@ -776,16 +792,16 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
ret_val = e1000e_setup_link(hw);
/* Set the transmit descriptor write-back policy */
reg_data = er32(TXDCTL);
reg_data = er32(TXDCTL(0));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
ew32(TXDCTL, reg_data);
ew32(TXDCTL(0), reg_data);
/* ...for both queues. */
reg_data = er32(TXDCTL1);
reg_data = er32(TXDCTL(1));
reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
ew32(TXDCTL1, reg_data);
ew32(TXDCTL(1), reg_data);
/* Enable retransmit on late collisions */
reg_data = er32(TCTL);
@ -808,7 +824,8 @@ static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
reg_data &= ~0x00100000;
E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
/* Clear all of the statistics registers (clear on read). It is
/*
* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
@ -829,29 +846,29 @@ static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
u32 reg;
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL);
reg = er32(TXDCTL(0));
reg |= (1 << 22);
ew32(TXDCTL, reg);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL1);
reg = er32(TXDCTL(1));
reg |= (1 << 22);
ew32(TXDCTL1, reg);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
reg = er32(TARC0);
reg = er32(TARC(0));
reg &= ~(0xF << 27); /* 30:27 */
if (hw->media_type != e1000_media_type_copper)
if (hw->phy.media_type != e1000_media_type_copper)
reg &= ~(1 << 20);
ew32(TARC0, reg);
ew32(TARC(0), reg);
/* Transmit Arbitration Control 1 */
reg = er32(TARC1);
reg = er32(TARC(1));
if (er32(TCTL) & E1000_TCTL_MULR)
reg &= ~(1 << 28);
else
reg |= (1 << 28);
ew32(TARC1, reg);
ew32(TARC(1), reg);
}
/**
@ -881,7 +898,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* Options:
/*
* Options:
* MDI/MDI-X = 0 (default)
* 0 - Auto for all speeds
* 1 - MDI mode
@ -907,7 +925,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
break;
}
/* Options:
/*
* Options:
* disable_polarity_correction = 0 (default)
* Automatic Correction for Reversed Cable Polarity
* 0 - Disabled
@ -928,10 +947,9 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
return ret_val;
}
/* Bypass RX and TX FIFO's */
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
/* Bypass Rx and Tx FIFO's */
ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
if (ret_val)
return ret_val;
@ -953,7 +971,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* Do not init these registers when the HW is in IAMT mode, since the
/*
* Do not init these registers when the HW is in IAMT mode, since the
* firmware will have already initialized them. We only initialize
* them if the HW is not in IAMT mode.
*/
@ -974,7 +993,8 @@ static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
return ret_val;
}
/* Workaround: Disable padding in Kumeran interface in the MAC
/*
* Workaround: Disable padding in Kumeran interface in the MAC
* and in the PHY to avoid CRC errors.
*/
ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
@ -1007,9 +1027,11 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ew32(CTRL, ctrl);
/* Set the mac to wait the maximum time between each
/*
* Set the mac to wait the maximum time between each
* iteration and increase the max iterations when
* polling the phy; this fixes erroneous timeouts at 10Mbps. */
* polling the phy; this fixes erroneous timeouts at 10Mbps.
*/
ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
if (ret_val)
return ret_val;
@ -1026,9 +1048,8 @@ static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
reg_data);
ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
reg_data);
if (ret_val)
return ret_val;
@ -1056,9 +1077,8 @@ static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
u16 reg_data;
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
reg_data);
ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
reg_data);
if (ret_val)
return ret_val;
@ -1096,9 +1116,8 @@ static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
u32 tipg;
reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
ret_val = e1000e_write_kmrn_reg(hw,
E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
reg_data);
ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
reg_data);
if (ret_val)
return ret_val;
@ -1175,7 +1194,7 @@ static struct e1000_mac_operations es2_mac_ops = {
.get_link_up_info = e1000_get_link_up_info_80003es2lan,
.led_on = e1000e_led_on_generic,
.led_off = e1000e_led_off_generic,
.mc_addr_list_update = e1000e_mc_addr_list_update_generic,
.update_mc_addr_list = e1000e_update_mc_addr_list_generic,
.reset_hw = e1000_reset_hw_80003es2lan,
.init_hw = e1000_init_hw_80003es2lan,
.setup_link = e1000e_setup_link,
@ -1224,7 +1243,7 @@ struct e1000_info e1000_es2_info = {
| FLAG_DISABLE_FC_PAUSE_TIME /* errata */
| FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
.pba = 38,
.get_invariants = e1000_get_invariants_80003es2lan,
.get_variants = e1000_get_variants_80003es2lan,
.mac_ops = &es2_mac_ops,
.phy_ops = &es2_phy_ops,
.nvm_ops = &es2_nvm_ops,

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2007 Intel Corporation.
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@ -102,7 +102,7 @@ static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
"Interrupt test (offline)", "Loopback test (offline)",
"Link test (on/offline)"
};
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
#define E1000_TEST_LEN ARRAY_SIZE(e1000_gstrings_test)
static int e1000_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
@ -111,7 +111,7 @@ static int e1000_get_settings(struct net_device *netdev,
struct e1000_hw *hw = &adapter->hw;
u32 status;
if (hw->media_type == e1000_media_type_copper) {
if (hw->phy.media_type == e1000_media_type_copper) {
ecmd->supported = (SUPPORTED_10baseT_Half |
SUPPORTED_10baseT_Full |
@ -165,7 +165,7 @@ static int e1000_get_settings(struct net_device *netdev,
ecmd->duplex = -1;
}
ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
return 0;
}
@ -187,7 +187,7 @@ static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
mac->autoneg = 0;
/* Fiber NICs only allow 1000 gbps Full duplex */
if ((adapter->hw.media_type == e1000_media_type_fiber) &&
if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
spddplx != (SPEED_1000 + DUPLEX_FULL)) {
ndev_err(adapter->netdev, "Unsupported Speed/Duplex "
"configuration\n");
@ -226,8 +226,10 @@ static int e1000_set_settings(struct net_device *netdev,
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
/* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed */
/*
* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed
*/
if (e1000_check_reset_block(hw)) {
ndev_err(netdev, "Cannot change link "
"characteristics when SoL/IDER is active.\n");
@ -239,7 +241,7 @@ static int e1000_set_settings(struct net_device *netdev,
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
if (hw->media_type == e1000_media_type_fiber)
if (hw->phy.media_type == e1000_media_type_fiber)
hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
ADVERTISED_FIBRE |
ADVERTISED_Autoneg;
@ -248,6 +250,8 @@ static int e1000_set_settings(struct net_device *netdev,
ADVERTISED_TP |
ADVERTISED_Autoneg;
ecmd->advertising = hw->phy.autoneg_advertised;
if (adapter->fc_autoneg)
hw->fc.original_type = e1000_fc_default;
} else {
if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
clear_bit(__E1000_RESETTING, &adapter->state);
@ -277,11 +281,11 @@ static void e1000_get_pauseparam(struct net_device *netdev,
pause->autoneg =
(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
if (hw->mac.fc == e1000_fc_rx_pause) {
if (hw->fc.type == e1000_fc_rx_pause) {
pause->rx_pause = 1;
} else if (hw->mac.fc == e1000_fc_tx_pause) {
} else if (hw->fc.type == e1000_fc_tx_pause) {
pause->tx_pause = 1;
} else if (hw->mac.fc == e1000_fc_full) {
} else if (hw->fc.type == e1000_fc_full) {
pause->rx_pause = 1;
pause->tx_pause = 1;
}
@ -300,18 +304,18 @@ static int e1000_set_pauseparam(struct net_device *netdev,
msleep(1);
if (pause->rx_pause && pause->tx_pause)
hw->mac.fc = e1000_fc_full;
hw->fc.type = e1000_fc_full;
else if (pause->rx_pause && !pause->tx_pause)
hw->mac.fc = e1000_fc_rx_pause;
hw->fc.type = e1000_fc_rx_pause;
else if (!pause->rx_pause && pause->tx_pause)
hw->mac.fc = e1000_fc_tx_pause;
hw->fc.type = e1000_fc_tx_pause;
else if (!pause->rx_pause && !pause->tx_pause)
hw->mac.fc = e1000_fc_none;
hw->fc.type = e1000_fc_none;
hw->mac.original_fc = hw->mac.fc;
hw->fc.original_type = hw->fc.type;
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->mac.fc = e1000_fc_default;
hw->fc.type = e1000_fc_default;
if (netif_running(adapter->netdev)) {
e1000e_down(adapter);
e1000e_up(adapter);
@ -319,7 +323,7 @@ static int e1000_set_pauseparam(struct net_device *netdev,
e1000e_reset(adapter);
}
} else {
retval = ((hw->media_type == e1000_media_type_fiber) ?
retval = ((hw->phy.media_type == e1000_media_type_fiber) ?
hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw));
}
@ -558,8 +562,10 @@ static int e1000_set_eeprom(struct net_device *netdev,
ret_val = e1000_write_nvm(hw, first_word,
last_word - first_word + 1, eeprom_buff);
/* Update the checksum over the first part of the EEPROM if needed
* and flush shadow RAM for 82573 controllers */
/*
* Update the checksum over the first part of the EEPROM if needed
* and flush shadow RAM for 82573 controllers
*/
if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) ||
(hw->mac.type == e1000_82573)))
e1000e_update_nvm_checksum(hw);
@ -578,8 +584,10 @@ static void e1000_get_drvinfo(struct net_device *netdev,
strncpy(drvinfo->driver, e1000e_driver_name, 32);
strncpy(drvinfo->version, e1000e_driver_version, 32);
/* EEPROM image version # is reported as firmware version # for
* PCI-E controllers */
/*
* EEPROM image version # is reported as firmware version # for
* PCI-E controllers
*/
e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data);
sprintf(firmware_version, "%d.%d-%d",
(eeprom_data & 0xF000) >> 12,
@ -633,10 +641,17 @@ static int e1000_set_ringparam(struct net_device *netdev,
tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
if (!tx_ring)
goto err_alloc_tx;
/*
* use a memcpy to save any previously configured
* items like napi structs from having to be
* reinitialized
*/
memcpy(tx_ring, tx_old, sizeof(struct e1000_ring));
rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
if (!rx_ring)
goto err_alloc_rx;
memcpy(rx_ring, rx_old, sizeof(struct e1000_ring));
adapter->tx_ring = tx_ring;
adapter->rx_ring = rx_ring;
@ -658,8 +673,10 @@ static int e1000_set_ringparam(struct net_device *netdev,
if (err)
goto err_setup_tx;
/* save the new, restore the old in order to free it,
* then restore the new back again */
/*
* restore the old in order to free it,
* then add in the new
*/
adapter->rx_ring = rx_old;
adapter->tx_ring = tx_old;
e1000e_free_rx_resources(adapter);
@ -690,61 +707,55 @@ err_setup:
return err;
}
static bool reg_pattern_test_array(struct e1000_adapter *adapter, u64 *data,
int reg, int offset, u32 mask, u32 write)
static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
int reg, int offset, u32 mask, u32 write)
{
int i;
u32 read;
u32 pat, val;
static const u32 test[] =
{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
for (i = 0; i < ARRAY_SIZE(test); i++) {
for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
(test[i] & write));
read = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
if (read != (test[i] & write & mask)) {
(test[pat] & write));
val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
if (val != (test[pat] & write & mask)) {
ndev_err(adapter->netdev, "pattern test reg %04X "
"failed: got 0x%08X expected 0x%08X\n",
reg + offset,
read, (test[i] & write & mask));
val, (test[pat] & write & mask));
*data = reg;
return true;
return 1;
}
}
return false;
return 0;
}
static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
int reg, u32 mask, u32 write)
{
u32 read;
u32 val;
__ew32(&adapter->hw, reg, write & mask);
read = __er32(&adapter->hw, reg);
if ((write & mask) != (read & mask)) {
val = __er32(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
ndev_err(adapter->netdev, "set/check reg %04X test failed: "
"got 0x%08X expected 0x%08X\n", reg, (read & mask),
"got 0x%08X expected 0x%08X\n", reg, (val & mask),
(write & mask));
*data = reg;
return true;
return 1;
}
return false;
return 0;
}
#define REG_PATTERN_TEST(R, M, W) \
do { \
if (reg_pattern_test_array(adapter, data, R, 0, M, W)) \
return 1; \
#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
do { \
if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
return 1; \
} while (0)
#define REG_PATTERN_TEST(reg, mask, write) \
REG_PATTERN_TEST_ARRAY(reg, 0, mask, write)
#define REG_PATTERN_TEST_ARRAY(R, offset, M, W) \
do { \
if (reg_pattern_test_array(adapter, data, R, offset, M, W)) \
return 1; \
} while (0)
#define REG_SET_AND_CHECK(R, M, W) \
do { \
if (reg_set_and_check(adapter, data, R, M, W)) \
return 1; \
#define REG_SET_AND_CHECK(reg, mask, write) \
do { \
if (reg_set_and_check(adapter, data, reg, mask, write)) \
return 1; \
} while (0)
static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
@ -758,7 +769,8 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
u32 i;
u32 toggle;
/* The status register is Read Only, so a write should fail.
/*
* The status register is Read Only, so a write should fail.
* Some bits that get toggled are ignored.
*/
switch (mac->type) {
@ -908,7 +920,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
mask = 1 << i;
if (!shared_int) {
/* Disable the interrupt to be reported in
/*
* Disable the interrupt to be reported in
* the cause register and then force the same
* interrupt and see if one gets posted. If
* an interrupt was posted to the bus, the
@ -925,7 +938,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
}
}
/* Enable the interrupt to be reported in
/*
* Enable the interrupt to be reported in
* the cause register and then force the same
* interrupt and see if one gets posted. If
* an interrupt was not posted to the bus, the
@ -942,7 +956,8 @@ static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
}
if (!shared_int) {
/* Disable the other interrupts to be reported in
/*
* Disable the other interrupts to be reported in
* the cause register and then force the other
* interrupts and see if any get posted. If
* an interrupt was posted to the bus, the
@ -1024,7 +1039,6 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct e1000_hw *hw = &adapter->hw;
u32 rctl;
int size;
int i;
int ret_val;
@ -1033,13 +1047,13 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
if (!tx_ring->count)
tx_ring->count = E1000_DEFAULT_TXD;
size = tx_ring->count * sizeof(struct e1000_buffer);
tx_ring->buffer_info = kmalloc(size, GFP_KERNEL);
if (!tx_ring->buffer_info) {
tx_ring->buffer_info = kcalloc(tx_ring->count,
sizeof(struct e1000_buffer),
GFP_KERNEL);
if (!(tx_ring->buffer_info)) {
ret_val = 1;
goto err_nomem;
}
memset(tx_ring->buffer_info, 0, size);
tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
tx_ring->size = ALIGN(tx_ring->size, 4096);
@ -1049,21 +1063,17 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
ret_val = 2;
goto err_nomem;
}
memset(tx_ring->desc, 0, tx_ring->size);
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
ew32(TDBAL,
((u64) tx_ring->dma & 0x00000000FFFFFFFF));
ew32(TDBAL, ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
ew32(TDBAH, ((u64) tx_ring->dma >> 32));
ew32(TDLEN,
tx_ring->count * sizeof(struct e1000_tx_desc));
ew32(TDLEN, tx_ring->count * sizeof(struct e1000_tx_desc));
ew32(TDH, 0);
ew32(TDT, 0);
ew32(TCTL,
E1000_TCTL_PSP | E1000_TCTL_EN |
E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
ew32(TCTL, E1000_TCTL_PSP | E1000_TCTL_EN | E1000_TCTL_MULR |
E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
for (i = 0; i < tx_ring->count; i++) {
struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
@ -1085,12 +1095,11 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
ret_val = 4;
goto err_nomem;
}
tx_desc->buffer_addr = cpu_to_le64(
tx_ring->buffer_info[i].dma);
tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
tx_desc->lower.data = cpu_to_le32(skb->len);
tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
E1000_TXD_CMD_IFCS |
E1000_TXD_CMD_RPS);
E1000_TXD_CMD_RS);
tx_desc->upper.data = 0;
}
@ -1099,13 +1108,13 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
if (!rx_ring->count)
rx_ring->count = E1000_DEFAULT_RXD;
size = rx_ring->count * sizeof(struct e1000_buffer);
rx_ring->buffer_info = kmalloc(size, GFP_KERNEL);
if (!rx_ring->buffer_info) {
rx_ring->buffer_info = kcalloc(rx_ring->count,
sizeof(struct e1000_buffer),
GFP_KERNEL);
if (!(rx_ring->buffer_info)) {
ret_val = 5;
goto err_nomem;
}
memset(rx_ring->buffer_info, 0, size);
rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
@ -1114,7 +1123,6 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
ret_val = 6;
goto err_nomem;
}
memset(rx_ring->desc, 0, rx_ring->size);
rx_ring->next_to_use = 0;
rx_ring->next_to_clean = 0;
@ -1126,6 +1134,8 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
ew32(RDH, 0);
ew32(RDT, 0);
rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_LPE |
E1000_RCTL_SBP | E1000_RCTL_SECRC |
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
ew32(RCTL, rctl);
@ -1175,21 +1185,22 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
u32 ctrl_reg = 0;
u32 stat_reg = 0;
adapter->hw.mac.autoneg = 0;
hw->mac.autoneg = 0;
if (adapter->hw.phy.type == e1000_phy_m88) {
if (hw->phy.type == e1000_phy_m88) {
/* Auto-MDI/MDIX Off */
e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, 0x0808);
/* reset to update Auto-MDI/MDIX */
e1e_wphy(hw, PHY_CONTROL, 0x9140);
/* autoneg off */
e1e_wphy(hw, PHY_CONTROL, 0x8140);
} else if (adapter->hw.phy.type == e1000_phy_gg82563)
} else if (hw->phy.type == e1000_phy_gg82563)
e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, 0x1CC);
ctrl_reg = er32(CTRL);
if (adapter->hw.phy.type == e1000_phy_ife) {
switch (hw->phy.type) {
case e1000_phy_ife:
/* force 100, set loopback */
e1e_wphy(hw, PHY_CONTROL, 0x6100);
@ -1199,9 +1210,11 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
E1000_CTRL_SPD_100 |/* Force Speed to 100 */
E1000_CTRL_FD); /* Force Duplex to FULL */
} else {
break;
default:
/* force 1000, set loopback */
e1e_wphy(hw, PHY_CONTROL, 0x4140);
mdelay(250);
/* Now set up the MAC to the same speed/duplex as the PHY. */
ctrl_reg = er32(CTRL);
@ -1210,14 +1223,20 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
E1000_CTRL_FD); /* Force Duplex to FULL */
if ((adapter->hw.mac.type == e1000_ich8lan) ||
(adapter->hw.mac.type == e1000_ich9lan))
ctrl_reg |= E1000_CTRL_SLU; /* Set Link Up */
}
if (adapter->hw.media_type == e1000_media_type_copper &&
adapter->hw.phy.type == e1000_phy_m88) {
if (hw->phy.media_type == e1000_media_type_copper &&
hw->phy.type == e1000_phy_m88) {
ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
} else {
/* Set the ILOS bit on the fiber Nic if half duplex link is
* detected. */
/*
* Set the ILOS bit on the fiber Nic if half duplex link is
* detected.
*/
stat_reg = er32(STATUS);
if ((stat_reg & E1000_STATUS_FD) == 0)
ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
@ -1225,10 +1244,11 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
ew32(CTRL, ctrl_reg);
/* Disable the receiver on the PHY so when a cable is plugged in, the
/*
* Disable the receiver on the PHY so when a cable is plugged in, the
* PHY does not begin to autoneg when a cable is reconnected to the NIC.
*/
if (adapter->hw.phy.type == e1000_phy_m88)
if (hw->phy.type == e1000_phy_m88)
e1000_phy_disable_receiver(adapter);
udelay(500);
@ -1244,8 +1264,10 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
/* special requirements for 82571/82572 fiber adapters */
/* jump through hoops to make sure link is up because serdes
* link is hardwired up */
/*
* jump through hoops to make sure link is up because serdes
* link is hardwired up
*/
ctrl |= E1000_CTRL_SLU;
ew32(CTRL, ctrl);
@ -1263,8 +1285,10 @@ static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
ew32(CTRL, ctrl);
}
/* special write to serdes control register to enable SerDes analog
* loopback */
/*
* special write to serdes control register to enable SerDes analog
* loopback
*/
#define E1000_SERDES_LB_ON 0x410
ew32(SCTL, E1000_SERDES_LB_ON);
msleep(10);
@ -1279,8 +1303,10 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
u32 ctrlext = er32(CTRL_EXT);
u32 ctrl = er32(CTRL);
/* save CTRL_EXT to restore later, reuse an empty variable (unused
on mac_type 80003es2lan) */
/*
* save CTRL_EXT to restore later, reuse an empty variable (unused
* on mac_type 80003es2lan)
*/
adapter->tx_fifo_head = ctrlext;
/* clear the serdes mode bits, putting the device into mac loopback */
@ -1302,7 +1328,7 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
#define KMRNCTRLSTA_OPMODE (0x1F << 16)
#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582
ew32(KMRNCTRLSTA,
(KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
(KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
return 0;
}
@ -1312,8 +1338,8 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
struct e1000_hw *hw = &adapter->hw;
u32 rctl;
if (hw->media_type == e1000_media_type_fiber ||
hw->media_type == e1000_media_type_internal_serdes) {
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes) {
switch (hw->mac.type) {
case e1000_80003es2lan:
return e1000_set_es2lan_mac_loopback(adapter);
@ -1328,7 +1354,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
ew32(RCTL, rctl);
return 0;
}
} else if (hw->media_type == e1000_media_type_copper) {
} else if (hw->phy.media_type == e1000_media_type_copper) {
return e1000_integrated_phy_loopback(adapter);
}
@ -1347,18 +1373,17 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
switch (hw->mac.type) {
case e1000_80003es2lan:
if (hw->media_type == e1000_media_type_fiber ||
hw->media_type == e1000_media_type_internal_serdes) {
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes) {
/* restore CTRL_EXT, stealing space from tx_fifo_head */
ew32(CTRL_EXT,
adapter->tx_fifo_head);
ew32(CTRL_EXT, adapter->tx_fifo_head);
adapter->tx_fifo_head = 0;
}
/* fall through */
case e1000_82571:
case e1000_82572:
if (hw->media_type == e1000_media_type_fiber ||
hw->media_type == e1000_media_type_internal_serdes) {
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes) {
#define E1000_SERDES_LB_OFF 0x400
ew32(SCTL, E1000_SERDES_LB_OFF);
msleep(10);
@ -1414,7 +1439,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ew32(RDT, rx_ring->count - 1);
/* Calculate the loop count based on the largest descriptor ring
/*
* Calculate the loop count based on the largest descriptor ring
* The idea is to wrap the largest ring a number of times using 64
* send/receive pairs during each loop
*/
@ -1428,8 +1454,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
l = 0;
for (j = 0; j <= lc; j++) { /* loop count loop */
for (i = 0; i < 64; i++) { /* send the packets */
e1000_create_lbtest_frame(
tx_ring->buffer_info[i].skb, 1024);
e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1024);
pci_dma_sync_single_for_device(pdev,
tx_ring->buffer_info[k].dma,
tx_ring->buffer_info[k].length,
@ -1454,7 +1480,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
l++;
if (l == rx_ring->count)
l = 0;
/* time + 20 msecs (200 msecs on 2.4) is more than
/*
* time + 20 msecs (200 msecs on 2.4) is more than
* enough time to complete the receives, if it's
* exceeded, break and error off
*/
@ -1463,7 +1490,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = 13; /* ret_val is the same as mis-compare */
break;
}
if (jiffies >= (time + 2)) {
if (jiffies >= (time + 20)) {
ret_val = 14; /* error code for time out error */
break;
}
@ -1473,8 +1500,10 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
{
/* PHY loopback cannot be performed if SoL/IDER
* sessions are active */
/*
* PHY loopback cannot be performed if SoL/IDER
* sessions are active
*/
if (e1000_check_reset_block(&adapter->hw)) {
ndev_err(adapter->netdev, "Cannot do PHY loopback test "
"when SoL/IDER is active.\n");
@ -1504,12 +1533,14 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
struct e1000_hw *hw = &adapter->hw;
*data = 0;
if (hw->media_type == e1000_media_type_internal_serdes) {
if (hw->phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
hw->mac.serdes_has_link = 0;
/* On some blade server designs, link establishment
* could take as long as 2-3 minutes */
/*
* On some blade server designs, link establishment
* could take as long as 2-3 minutes
*/
do {
hw->mac.ops.check_for_link(hw);
if (hw->mac.serdes_has_link)
@ -1562,8 +1593,10 @@ static void e1000_diag_test(struct net_device *netdev,
ndev_info(netdev, "offline testing starting\n");
/* Link test performed before hardware reset so autoneg doesn't
* interfere with test result */
/*
* Link test performed before hardware reset so autoneg doesn't
* interfere with test result
*/
if (e1000_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
@ -1596,9 +1629,9 @@ static void e1000_diag_test(struct net_device *netdev,
adapter->hw.mac.autoneg = autoneg;
/* force this routine to wait until autoneg complete/timeout */
adapter->hw.phy.wait_for_link = 1;
adapter->hw.phy.autoneg_wait_to_complete = 1;
e1000e_reset(adapter);
adapter->hw.phy.wait_for_link = 0;
adapter->hw.phy.autoneg_wait_to_complete = 0;
clear_bit(__E1000_TESTING, &adapter->state);
if (if_running)
@ -1768,8 +1801,7 @@ static void e1000_get_strings(struct net_device *netdev, u32 stringset,
switch (stringset) {
case ETH_SS_TEST:
memcpy(data, *e1000_gstrings_test,
sizeof(e1000_gstrings_test));
memcpy(data, *e1000_gstrings_test, sizeof(e1000_gstrings_test));
break;
case ETH_SS_STATS:
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2007 Intel Corporation.
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@ -66,14 +66,14 @@ enum e1e_registers {
E1000_IMS = 0x000D0, /* Interrupt Mask Set - RW */
E1000_IMC = 0x000D8, /* Interrupt Mask Clear - WO */
E1000_IAM = 0x000E0, /* Interrupt Acknowledge Auto Mask */
E1000_RCTL = 0x00100, /* RX Control - RW */
E1000_RCTL = 0x00100, /* Rx Control - RW */
E1000_FCTTV = 0x00170, /* Flow Control Transmit Timer Value - RW */
E1000_TXCW = 0x00178, /* TX Configuration Word - RW */
E1000_RXCW = 0x00180, /* RX Configuration Word - RO */
E1000_TCTL = 0x00400, /* TX Control - RW */
E1000_TCTL_EXT = 0x00404, /* Extended TX Control - RW */
E1000_TIPG = 0x00410, /* TX Inter-packet gap -RW */
E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle - RW */
E1000_TXCW = 0x00178, /* Tx Configuration Word - RW */
E1000_RXCW = 0x00180, /* Rx Configuration Word - RO */
E1000_TCTL = 0x00400, /* Tx Control - RW */
E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */
E1000_TIPG = 0x00410, /* Tx Inter-packet gap -RW */
E1000_AIT = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */
E1000_LEDCTL = 0x00E00, /* LED Control - RW */
E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
@ -87,12 +87,14 @@ enum e1e_registers {
E1000_FCRTL = 0x02160, /* Flow Control Receive Threshold Low - RW */
E1000_FCRTH = 0x02168, /* Flow Control Receive Threshold High - RW */
E1000_PSRCTL = 0x02170, /* Packet Split Receive Control - RW */
E1000_RDBAL = 0x02800, /* RX Descriptor Base Address Low - RW */
E1000_RDBAH = 0x02804, /* RX Descriptor Base Address High - RW */
E1000_RDLEN = 0x02808, /* RX Descriptor Length - RW */
E1000_RDH = 0x02810, /* RX Descriptor Head - RW */
E1000_RDT = 0x02818, /* RX Descriptor Tail - RW */
E1000_RDTR = 0x02820, /* RX Delay Timer - RW */
E1000_RDBAL = 0x02800, /* Rx Descriptor Base Address Low - RW */
E1000_RDBAH = 0x02804, /* Rx Descriptor Base Address High - RW */
E1000_RDLEN = 0x02808, /* Rx Descriptor Length - RW */
E1000_RDH = 0x02810, /* Rx Descriptor Head - RW */
E1000_RDT = 0x02818, /* Rx Descriptor Tail - RW */
E1000_RDTR = 0x02820, /* Rx Delay Timer - RW */
E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
#define E1000_RXDCTL(_n) (E1000_RXDCTL_BASE + (_n << 8))
E1000_RADV = 0x0282C, /* RX Interrupt Absolute Delay Timer - RW */
/* Convenience macros
@ -105,17 +107,17 @@ enum e1e_registers {
*/
#define E1000_RDBAL_REG(_n) (E1000_RDBAL + (_n << 8))
E1000_KABGTXD = 0x03004, /* AFE Band Gap Transmit Ref Data */
E1000_TDBAL = 0x03800, /* TX Descriptor Base Address Low - RW */
E1000_TDBAH = 0x03804, /* TX Descriptor Base Address High - RW */
E1000_TDLEN = 0x03808, /* TX Descriptor Length - RW */
E1000_TDH = 0x03810, /* TX Descriptor Head - RW */
E1000_TDT = 0x03818, /* TX Descriptor Tail - RW */
E1000_TIDV = 0x03820, /* TX Interrupt Delay Value - RW */
E1000_TXDCTL = 0x03828, /* TX Descriptor Control - RW */
E1000_TADV = 0x0382C, /* TX Interrupt Absolute Delay Val - RW */
E1000_TARC0 = 0x03840, /* TX Arbitration Count (0) */
E1000_TXDCTL1 = 0x03928, /* TX Descriptor Control (1) - RW */
E1000_TARC1 = 0x03940, /* TX Arbitration Count (1) */
E1000_TDBAL = 0x03800, /* Tx Descriptor Base Address Low - RW */
E1000_TDBAH = 0x03804, /* Tx Descriptor Base Address High - RW */
E1000_TDLEN = 0x03808, /* Tx Descriptor Length - RW */
E1000_TDH = 0x03810, /* Tx Descriptor Head - RW */
E1000_TDT = 0x03818, /* Tx Descriptor Tail - RW */
E1000_TIDV = 0x03820, /* Tx Interrupt Delay Value - RW */
E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
#define E1000_TXDCTL(_n) (E1000_TXDCTL_BASE + (_n << 8))
E1000_TADV = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */
E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */
#define E1000_TARC(_n) (E1000_TARC_BASE + (_n << 8))
E1000_CRCERRS = 0x04000, /* CRC Error Count - R/clr */
E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
E1000_SYMERRS = 0x04008, /* Symbol Error Count - R/clr */
@ -127,53 +129,53 @@ enum e1e_registers {
E1000_LATECOL = 0x04020, /* Late Collision Count - R/clr */
E1000_COLC = 0x04028, /* Collision Count - R/clr */
E1000_DC = 0x04030, /* Defer Count - R/clr */
E1000_TNCRS = 0x04034, /* TX-No CRS - R/clr */
E1000_TNCRS = 0x04034, /* Tx-No CRS - R/clr */
E1000_SEC = 0x04038, /* Sequence Error Count - R/clr */
E1000_CEXTERR = 0x0403C, /* Carrier Extension Error Count - R/clr */
E1000_RLEC = 0x04040, /* Receive Length Error Count - R/clr */
E1000_XONRXC = 0x04048, /* XON RX Count - R/clr */
E1000_XONTXC = 0x0404C, /* XON TX Count - R/clr */
E1000_XOFFRXC = 0x04050, /* XOFF RX Count - R/clr */
E1000_XOFFTXC = 0x04054, /* XOFF TX Count - R/clr */
E1000_FCRUC = 0x04058, /* Flow Control RX Unsupported Count- R/clr */
E1000_PRC64 = 0x0405C, /* Packets RX (64 bytes) - R/clr */
E1000_PRC127 = 0x04060, /* Packets RX (65-127 bytes) - R/clr */
E1000_PRC255 = 0x04064, /* Packets RX (128-255 bytes) - R/clr */
E1000_PRC511 = 0x04068, /* Packets RX (255-511 bytes) - R/clr */
E1000_PRC1023 = 0x0406C, /* Packets RX (512-1023 bytes) - R/clr */
E1000_PRC1522 = 0x04070, /* Packets RX (1024-1522 bytes) - R/clr */
E1000_GPRC = 0x04074, /* Good Packets RX Count - R/clr */
E1000_BPRC = 0x04078, /* Broadcast Packets RX Count - R/clr */
E1000_MPRC = 0x0407C, /* Multicast Packets RX Count - R/clr */
E1000_GPTC = 0x04080, /* Good Packets TX Count - R/clr */
E1000_GORCL = 0x04088, /* Good Octets RX Count Low - R/clr */
E1000_GORCH = 0x0408C, /* Good Octets RX Count High - R/clr */
E1000_GOTCL = 0x04090, /* Good Octets TX Count Low - R/clr */
E1000_GOTCH = 0x04094, /* Good Octets TX Count High - R/clr */
E1000_RNBC = 0x040A0, /* RX No Buffers Count - R/clr */
E1000_RUC = 0x040A4, /* RX Undersize Count - R/clr */
E1000_RFC = 0x040A8, /* RX Fragment Count - R/clr */
E1000_ROC = 0x040AC, /* RX Oversize Count - R/clr */
E1000_RJC = 0x040B0, /* RX Jabber Count - R/clr */
E1000_MGTPRC = 0x040B4, /* Management Packets RX Count - R/clr */
E1000_XONRXC = 0x04048, /* XON Rx Count - R/clr */
E1000_XONTXC = 0x0404C, /* XON Tx Count - R/clr */
E1000_XOFFRXC = 0x04050, /* XOFF Rx Count - R/clr */
E1000_XOFFTXC = 0x04054, /* XOFF Tx Count - R/clr */
E1000_FCRUC = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */
E1000_PRC64 = 0x0405C, /* Packets Rx (64 bytes) - R/clr */
E1000_PRC127 = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */
E1000_PRC255 = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */
E1000_PRC511 = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */
E1000_PRC1023 = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */
E1000_PRC1522 = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */
E1000_GPRC = 0x04074, /* Good Packets Rx Count - R/clr */
E1000_BPRC = 0x04078, /* Broadcast Packets Rx Count - R/clr */
E1000_MPRC = 0x0407C, /* Multicast Packets Rx Count - R/clr */
E1000_GPTC = 0x04080, /* Good Packets Tx Count - R/clr */
E1000_GORCL = 0x04088, /* Good Octets Rx Count Low - R/clr */
E1000_GORCH = 0x0408C, /* Good Octets Rx Count High - R/clr */
E1000_GOTCL = 0x04090, /* Good Octets Tx Count Low - R/clr */
E1000_GOTCH = 0x04094, /* Good Octets Tx Count High - R/clr */
E1000_RNBC = 0x040A0, /* Rx No Buffers Count - R/clr */
E1000_RUC = 0x040A4, /* Rx Undersize Count - R/clr */
E1000_RFC = 0x040A8, /* Rx Fragment Count - R/clr */
E1000_ROC = 0x040AC, /* Rx Oversize Count - R/clr */
E1000_RJC = 0x040B0, /* Rx Jabber Count - R/clr */
E1000_MGTPRC = 0x040B4, /* Management Packets Rx Count - R/clr */
E1000_MGTPDC = 0x040B8, /* Management Packets Dropped Count - R/clr */
E1000_MGTPTC = 0x040BC, /* Management Packets TX Count - R/clr */
E1000_TORL = 0x040C0, /* Total Octets RX Low - R/clr */
E1000_TORH = 0x040C4, /* Total Octets RX High - R/clr */
E1000_TOTL = 0x040C8, /* Total Octets TX Low - R/clr */
E1000_TOTH = 0x040CC, /* Total Octets TX High - R/clr */
E1000_TPR = 0x040D0, /* Total Packets RX - R/clr */
E1000_TPT = 0x040D4, /* Total Packets TX - R/clr */
E1000_PTC64 = 0x040D8, /* Packets TX (64 bytes) - R/clr */
E1000_PTC127 = 0x040DC, /* Packets TX (65-127 bytes) - R/clr */
E1000_PTC255 = 0x040E0, /* Packets TX (128-255 bytes) - R/clr */
E1000_PTC511 = 0x040E4, /* Packets TX (256-511 bytes) - R/clr */
E1000_PTC1023 = 0x040E8, /* Packets TX (512-1023 bytes) - R/clr */
E1000_PTC1522 = 0x040EC, /* Packets TX (1024-1522 Bytes) - R/clr */
E1000_MPTC = 0x040F0, /* Multicast Packets TX Count - R/clr */
E1000_BPTC = 0x040F4, /* Broadcast Packets TX Count - R/clr */
E1000_TSCTC = 0x040F8, /* TCP Segmentation Context TX - R/clr */
E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context TX Fail - R/clr */
E1000_MGTPTC = 0x040BC, /* Management Packets Tx Count - R/clr */
E1000_TORL = 0x040C0, /* Total Octets Rx Low - R/clr */
E1000_TORH = 0x040C4, /* Total Octets Rx High - R/clr */
E1000_TOTL = 0x040C8, /* Total Octets Tx Low - R/clr */
E1000_TOTH = 0x040CC, /* Total Octets Tx High - R/clr */
E1000_TPR = 0x040D0, /* Total Packets Rx - R/clr */
E1000_TPT = 0x040D4, /* Total Packets Tx - R/clr */
E1000_PTC64 = 0x040D8, /* Packets Tx (64 bytes) - R/clr */
E1000_PTC127 = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */
E1000_PTC255 = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */
E1000_PTC511 = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */
E1000_PTC1023 = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */
E1000_PTC1522 = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */
E1000_MPTC = 0x040F0, /* Multicast Packets Tx Count - R/clr */
E1000_BPTC = 0x040F4, /* Broadcast Packets Tx Count - R/clr */
E1000_TSCTC = 0x040F8, /* TCP Segmentation Context Tx - R/clr */
E1000_TSCTFC = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */
E1000_IAC = 0x04100, /* Interrupt Assertion Count */
E1000_ICRXPTC = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */
E1000_ICRXATC = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */
@ -183,7 +185,7 @@ enum e1e_registers {
E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */
E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
E1000_ICRXOC = 0x04124, /* Irq Cause Receiver Overrun Count */
E1000_RXCSUM = 0x05000, /* RX Checksum Control - RW */
E1000_RXCSUM = 0x05000, /* Rx Checksum Control - RW */
E1000_RFCTL = 0x05008, /* Receive Filter Control */
E1000_MTA = 0x05200, /* Multicast Table Array - RW Array */
E1000_RA = 0x05400, /* Receive Address - RW Array */
@ -250,8 +252,8 @@ enum e1e_registers {
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
#define E1000_HICR_EN 0x01 /* Enable bit - RO */
#define E1000_HICR_C 0x02 /* Driver sets this bit when done
* to put command in RAM */
/* Driver sets this bit when done to put command in RAM */
#define E1000_HICR_C 0x02
#define E1000_HICR_FW_RESET_ENABLE 0x40
#define E1000_HICR_FW_RESET 0x80
@ -400,7 +402,7 @@ enum e1000_rev_polarity{
e1000_rev_polarity_undefined = 0xFF
};
enum e1000_fc_mode {
enum e1000_fc_type {
e1000_fc_none = 0,
e1000_fc_rx_pause,
e1000_fc_tx_pause,
@ -685,8 +687,7 @@ struct e1000_mac_operations {
s32 (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
s32 (*led_on)(struct e1000_hw *);
s32 (*led_off)(struct e1000_hw *);
void (*mc_addr_list_update)(struct e1000_hw *, u8 *, u32, u32,
u32);
void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32, u32);
s32 (*reset_hw)(struct e1000_hw *);
s32 (*init_hw)(struct e1000_hw *);
s32 (*setup_link)(struct e1000_hw *);
@ -728,16 +729,12 @@ struct e1000_mac_info {
u8 perm_addr[6];
enum e1000_mac_type type;
enum e1000_fc_mode fc;
enum e1000_fc_mode original_fc;
u32 collision_delta;
u32 ledctl_default;
u32 ledctl_mode1;
u32 ledctl_mode2;
u32 max_frame_size;
u32 mc_filter_type;
u32 min_frame_size;
u32 tx_packet_delta;
u32 txcw;
@ -748,9 +745,6 @@ struct e1000_mac_info {
u16 ifs_step_size;
u16 mta_reg_count;
u16 rar_entry_count;
u16 fc_high_water;
u16 fc_low_water;
u16 fc_pause_time;
u8 forced_speed_duplex;
@ -780,6 +774,8 @@ struct e1000_phy_info {
u32 reset_delay_us; /* in usec */
u32 revision;
enum e1000_media_type media_type;
u16 autoneg_advertised;
u16 autoneg_mask;
u16 cable_length;
@ -792,7 +788,7 @@ struct e1000_phy_info {
bool is_mdix;
bool polarity_correction;
bool speed_downgraded;
bool wait_for_link;
bool autoneg_wait_to_complete;
};
struct e1000_nvm_info {
@ -817,6 +813,16 @@ struct e1000_bus_info {
u16 func;
};
struct e1000_fc_info {
u32 high_water; /* Flow control high-water mark */
u32 low_water; /* Flow control low-water mark */
u16 pause_time; /* Flow control pause timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
enum e1000_fc_type type; /* Type of flow control */
enum e1000_fc_type original_type;
};
struct e1000_dev_spec_82571 {
bool laa_is_present;
bool alt_mac_addr_is_present;
@ -841,6 +847,7 @@ struct e1000_hw {
u8 __iomem *flash_address;
struct e1000_mac_info mac;
struct e1000_fc_info fc;
struct e1000_phy_info phy;
struct e1000_nvm_info nvm;
struct e1000_bus_info bus;
@ -850,8 +857,6 @@ struct e1000_hw {
struct e1000_dev_spec_82571 e82571;
struct e1000_dev_spec_ich8lan ich8lan;
} dev_spec;
enum e1000_media_type media_type;
};
#ifdef DEBUG

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2007 Intel Corporation.
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@ -243,8 +243,7 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
u32 sector_end_addr;
u16 i;
/* Can't read flash registers if the register set isn't mapped.
*/
/* Can't read flash registers if the register set isn't mapped. */
if (!hw->flash_address) {
hw_dbg(hw, "ERROR: Flash registers not mapped\n");
return -E1000_ERR_CONFIG;
@ -254,17 +253,21 @@ static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
gfpreg = er32flash(ICH_FLASH_GFPREG);
/* sector_X_addr is a "sector"-aligned address (4096 bytes)
/*
* sector_X_addr is a "sector"-aligned address (4096 bytes)
* Add 1 to sector_end_addr since this sector is included in
* the overall size. */
* the overall size.
*/
sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
/* flash_base_addr is byte-aligned */
nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
/* find total size of the NVM, then cut in half since the total
* size represents two separate NVM banks. */
/*
* find total size of the NVM, then cut in half since the total
* size represents two separate NVM banks.
*/
nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
<< FLASH_SECTOR_ADDR_SHIFT;
nvm->flash_bank_size /= 2;
@ -295,7 +298,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
struct e1000_mac_info *mac = &hw->mac;
/* Set media type function pointer */
hw->media_type = e1000_media_type_copper;
hw->phy.media_type = e1000_media_type_copper;
/* Set mta register count */
mac->mta_reg_count = 32;
@ -313,7 +316,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
return 0;
}
static s32 e1000_get_invariants_ich8lan(struct e1000_adapter *adapter)
static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
s32 rc;
@ -450,7 +453,7 @@ static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw)
udelay(1);
if (phy->wait_for_link) {
if (phy->autoneg_wait_to_complete) {
hw_dbg(hw, "Waiting for forced speed/duplex link on IFE phy.\n");
ret_val = e1000e_phy_has_link_generic(hw,
@ -496,7 +499,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* Initialize the PHY from the NVM on ICH platforms. This
/*
* Initialize the PHY from the NVM on ICH platforms. This
* is needed due to an issue where the NVM configuration is
* not properly autoloaded after power transitions.
* Therefore, after each PHY reset, we will load the
@ -523,7 +527,8 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
udelay(100);
} while ((!data) && --loop);
/* If basic configuration is incomplete before the above loop
/*
* If basic configuration is incomplete before the above loop
* count reaches 0, loading the configuration from NVM will
* leave the PHY in a bad state possibly resulting in no link.
*/
@ -536,8 +541,10 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
data &= ~E1000_STATUS_LAN_INIT_DONE;
ew32(STATUS, data);
/* Make sure HW does not configure LCD from PHY
* extended configuration before SW configuration */
/*
* Make sure HW does not configure LCD from PHY
* extended configuration before SW configuration
*/
data = er32(EXTCNF_CTRL);
if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
return 0;
@ -551,8 +558,7 @@ static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
/* Configure LCD from extended configuration
* region. */
/* Configure LCD from extended configuration region. */
/* cnf_base_addr is in DWORD */
word_addr = (u16)(cnf_base_addr << 1);
@ -681,8 +687,8 @@ static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data, offset, mask;
/* Polarity is determined based on the reversal feature
* being enabled.
/*
* Polarity is determined based on the reversal feature being enabled.
*/
if (phy->polarity_correction) {
offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
@ -731,8 +737,10 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
ew32(PHY_CTRL, phy_ctrl);
/* Call gig speed drop workaround on LPLU before accessing
* any PHY registers */
/*
* Call gig speed drop workaround on LPLU before accessing
* any PHY registers
*/
if ((hw->mac.type == e1000_ich8lan) &&
(hw->phy.type == e1000_phy_igp_3))
e1000e_gig_downshift_workaround_ich8lan(hw);
@ -747,30 +755,32 @@ static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
ew32(PHY_CTRL, phy_ctrl);
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used
/*
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained. */
* SmartSpeed, so performance is maintained.
*/
if (phy->smart_speed == e1000_smart_speed_on) {
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
&data);
if (ret_val)
return ret_val;
data |= IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
data);
if (ret_val)
return ret_val;
} else if (phy->smart_speed == e1000_smart_speed_off) {
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
&data);
if (ret_val)
return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
data);
if (ret_val)
return ret_val;
}
@ -804,34 +814,32 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
if (!active) {
phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
ew32(PHY_CTRL, phy_ctrl);
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used
/*
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained. */
* SmartSpeed, so performance is maintained.
*/
if (phy->smart_speed == e1000_smart_speed_on) {
ret_val = e1e_rphy(hw,
IGP01E1000_PHY_PORT_CONFIG,
&data);
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
if (ret_val)
return ret_val;
data |= IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw,
IGP01E1000_PHY_PORT_CONFIG,
data);
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
if (ret_val)
return ret_val;
} else if (phy->smart_speed == e1000_smart_speed_off) {
ret_val = e1e_rphy(hw,
IGP01E1000_PHY_PORT_CONFIG,
&data);
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
if (ret_val)
return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw,
IGP01E1000_PHY_PORT_CONFIG,
data);
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
if (ret_val)
return ret_val;
}
@ -841,23 +849,21 @@ static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
ew32(PHY_CTRL, phy_ctrl);
/* Call gig speed drop workaround on LPLU before accessing
* any PHY registers */
/*
* Call gig speed drop workaround on LPLU before accessing
* any PHY registers
*/
if ((hw->mac.type == e1000_ich8lan) &&
(hw->phy.type == e1000_phy_igp_3))
e1000e_gig_downshift_workaround_ich8lan(hw);
/* When LPLU is enabled, we should disable SmartSpeed */
ret_val = e1e_rphy(hw,
IGP01E1000_PHY_PORT_CONFIG,
&data);
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
if (ret_val)
return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw,
IGP01E1000_PHY_PORT_CONFIG,
data);
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
}
return 0;
@ -944,7 +950,8 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
/* Either we should have a hardware SPI cycle in progress
/*
* Either we should have a hardware SPI cycle in progress
* bit to check against, in order to start a new cycle or
* FDONE bit should be changed in the hardware so that it
* is 1 after hardware reset, which can then be used as an
@ -953,15 +960,19 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
*/
if (hsfsts.hsf_status.flcinprog == 0) {
/* There is no cycle running at present,
* so we can start a cycle */
/* Begin by setting Flash Cycle Done. */
/*
* There is no cycle running at present,
* so we can start a cycle
* Begin by setting Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
ret_val = 0;
} else {
/* otherwise poll for sometime so the current
* cycle has a chance to end before giving up. */
/*
* otherwise poll for sometime so the current
* cycle has a chance to end before giving up.
*/
for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS);
if (hsfsts.hsf_status.flcinprog == 0) {
@ -971,8 +982,10 @@ static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
udelay(1);
}
if (ret_val == 0) {
/* Successful in waiting for previous cycle to timeout,
* now set the Flash Cycle Done. */
/*
* Successful in waiting for previous cycle to timeout,
* now set the Flash Cycle Done.
*/
hsfsts.hsf_status.flcdone = 1;
ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
} else {
@ -1077,10 +1090,12 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
ret_val = e1000_flash_cycle_ich8lan(hw,
ICH_FLASH_READ_COMMAND_TIMEOUT);
/* Check if FCERR is set to 1, if set to 1, clear it
/*
* Check if FCERR is set to 1, if set to 1, clear it
* and try the whole sequence a few more times, else
* read in (shift in) the Flash Data0, the order is
* least significant byte first msb to lsb */
* least significant byte first msb to lsb
*/
if (ret_val == 0) {
flash_data = er32flash(ICH_FLASH_FDATA0);
if (size == 1) {
@ -1090,7 +1105,8 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
}
break;
} else {
/* If we've gotten here, then things are probably
/*
* If we've gotten here, then things are probably
* completely hosed, but if the error condition is
* detected, it won't hurt to give it another try...
* ICH_FLASH_CYCLE_REPEAT_COUNT times.
@ -1168,18 +1184,20 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
ret_val = e1000e_update_nvm_checksum_generic(hw);
if (ret_val)
return ret_val;;
return ret_val;
if (nvm->type != e1000_nvm_flash_sw)
return ret_val;;
return ret_val;
ret_val = e1000_acquire_swflag_ich8lan(hw);
if (ret_val)
return ret_val;;
return ret_val;
/* We're writing to the opposite bank so if we're on bank 1,
/*
* We're writing to the opposite bank so if we're on bank 1,
* write to bank 0 etc. We also need to erase the segment that
* is going to be written */
* is going to be written
*/
if (!(er32(EECD) & E1000_EECD_SEC1VAL)) {
new_bank_offset = nvm->flash_bank_size;
old_bank_offset = 0;
@ -1191,9 +1209,11 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
}
for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
/* Determine whether to write the value stored
/*
* Determine whether to write the value stored
* in the other NVM bank or a modified value stored
* in the shadow RAM */
* in the shadow RAM
*/
if (dev_spec->shadow_ram[i].modified) {
data = dev_spec->shadow_ram[i].value;
} else {
@ -1202,12 +1222,14 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
&data);
}
/* If the word is 0x13, then make sure the signature bits
/*
* If the word is 0x13, then make sure the signature bits
* (15:14) are 11b until the commit has completed.
* This will allow us to write 10b which indicates the
* signature is valid. We want to do this after the write
* has completed so that we don't mark the segment valid
* while the write is still in progress */
* while the write is still in progress
*/
if (i == E1000_ICH_NVM_SIG_WORD)
data |= E1000_ICH_NVM_SIG_MASK;
@ -1230,18 +1252,22 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
break;
}
/* Don't bother writing the segment valid bits if sector
* programming failed. */
/*
* Don't bother writing the segment valid bits if sector
* programming failed.
*/
if (ret_val) {
hw_dbg(hw, "Flash commit failed.\n");
e1000_release_swflag_ich8lan(hw);
return ret_val;
}
/* Finally validate the new segment by setting bit 15:14
/*
* Finally validate the new segment by setting bit 15:14
* to 10b in word 0x13 , this can be done without an
* erase as well since these bits are 11 to start with
* and we need to change bit 14 to 0b */
* and we need to change bit 14 to 0b
*/
act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
e1000_read_flash_word_ich8lan(hw, act_offset, &data);
data &= 0xBFFF;
@ -1253,10 +1279,12 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
return ret_val;
}
/* And invalidate the previously valid segment by setting
/*
* And invalidate the previously valid segment by setting
* its signature word (0x13) high_byte to 0b. This can be
* done without an erase because flash erase sets all bits
* to 1's. We can write 1's to 0's without an erase */
* to 1's. We can write 1's to 0's without an erase
*/
act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
if (ret_val) {
@ -1272,7 +1300,8 @@ static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
e1000_release_swflag_ich8lan(hw);
/* Reload the EEPROM, or else modifications will not appear
/*
* Reload the EEPROM, or else modifications will not appear
* until after the next adapter reset.
*/
e1000e_reload_nvm(hw);
@ -1294,7 +1323,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
s32 ret_val;
u16 data;
/* Read 0x19 and check bit 6. If this bit is 0, the checksum
/*
* Read 0x19 and check bit 6. If this bit is 0, the checksum
* needs to be fixed. This bit is an indication that the NVM
* was prepared by OEM software and did not calculate the
* checksum...a likely scenario.
@ -1364,14 +1394,17 @@ static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
ew32flash(ICH_FLASH_FDATA0, flash_data);
/* check if FCERR is set to 1 , if set to 1, clear it
* and try the whole sequence a few more times else done */
/*
* check if FCERR is set to 1 , if set to 1, clear it
* and try the whole sequence a few more times else done
*/
ret_val = e1000_flash_cycle_ich8lan(hw,
ICH_FLASH_WRITE_COMMAND_TIMEOUT);
if (!ret_val)
break;
/* If we're here, then things are most likely
/*
* If we're here, then things are most likely
* completely hosed, but if the error condition
* is detected, it won't hurt to give it another
* try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
@ -1462,9 +1495,10 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
/* Determine HW Sector size: Read BERASE bits of hw flash status
* register */
/* 00: The Hw sector is 256 bytes, hence we need to erase 16
/*
* Determine HW Sector size: Read BERASE bits of hw flash status
* register
* 00: The Hw sector is 256 bytes, hence we need to erase 16
* consecutive sectors. The start index for the nth Hw sector
* can be calculated as = bank * 4096 + n * 256
* 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
@ -1511,13 +1545,16 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
if (ret_val)
return ret_val;
/* Write a value 11 (block Erase) in Flash
* Cycle field in hw flash control */
/*
* Write a value 11 (block Erase) in Flash
* Cycle field in hw flash control
*/
hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
/* Write the last 24 bits of an index within the
/*
* Write the last 24 bits of an index within the
* block into Flash Linear address field in Flash
* Address.
*/
@ -1529,13 +1566,14 @@ static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
if (ret_val == 0)
break;
/* Check if FCERR is set to 1. If 1,
/*
* Check if FCERR is set to 1. If 1,
* clear it and try the whole sequence
* a few more times else Done */
* a few more times else Done
*/
hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
if (hsfsts.hsf_status.flcerr == 1)
/* repeat for some time before
* giving up */
/* repeat for some time before giving up */
continue;
else if (hsfsts.hsf_status.flcdone == 0)
return ret_val;
@ -1585,7 +1623,8 @@ static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
ret_val = e1000e_get_bus_info_pcie(hw);
/* ICH devices are "PCI Express"-ish. They have
/*
* ICH devices are "PCI Express"-ish. They have
* a configuration space, but do not contain
* PCI Express Capability registers, so bus width
* must be hardcoded.
@ -1608,7 +1647,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
u32 ctrl, icr, kab;
s32 ret_val;
/* Prevent the PCI-E bus from sticking if there is no TLP connection
/*
* Prevent the PCI-E bus from sticking if there is no TLP connection
* on the last TLP read/write transaction when MAC is reset.
*/
ret_val = e1000e_disable_pcie_master(hw);
@ -1619,7 +1659,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
hw_dbg(hw, "Masking off all interrupts\n");
ew32(IMC, 0xffffffff);
/* Disable the Transmit and Receive units. Then delay to allow
/*
* Disable the Transmit and Receive units. Then delay to allow
* any pending transactions to complete before we hit the MAC
* with the global reset.
*/
@ -1640,7 +1681,8 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
ctrl = er32(CTRL);
if (!e1000_check_reset_block(hw)) {
/* PHY HW reset requires MAC CORE reset at the same
/*
* PHY HW reset requires MAC CORE reset at the same
* time to make sure the interface between MAC and the
* external PHY is reset.
*/
@ -1711,21 +1753,23 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
ret_val = e1000_setup_link_ich8lan(hw);
/* Set the transmit descriptor write-back policy for both queues */
txdctl = er32(TXDCTL);
txdctl = er32(TXDCTL(0));
txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB;
txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
ew32(TXDCTL, txdctl);
txdctl = er32(TXDCTL1);
ew32(TXDCTL(0), txdctl);
txdctl = er32(TXDCTL(1));
txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
E1000_TXDCTL_FULL_TX_DESC_WB;
txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
ew32(TXDCTL1, txdctl);
ew32(TXDCTL(1), txdctl);
/* ICH8 has opposite polarity of no_snoop bits.
* By default, we should use snoop behavior. */
/*
* ICH8 has opposite polarity of no_snoop bits.
* By default, we should use snoop behavior.
*/
if (mac->type == e1000_ich8lan)
snoop = PCIE_ICH8_SNOOP_ALL;
else
@ -1736,7 +1780,8 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
ew32(CTRL_EXT, ctrl_ext);
/* Clear all of the statistics registers (clear on read). It is
/*
* Clear all of the statistics registers (clear on read). It is
* important that we do this after we have tried to establish link
* because the symbol error count will increment wildly if there
* is no link.
@ -1762,30 +1807,30 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
ew32(CTRL_EXT, reg);
/* Transmit Descriptor Control 0 */
reg = er32(TXDCTL);
reg = er32(TXDCTL(0));
reg |= (1 << 22);
ew32(TXDCTL, reg);
ew32(TXDCTL(0), reg);
/* Transmit Descriptor Control 1 */
reg = er32(TXDCTL1);
reg = er32(TXDCTL(1));
reg |= (1 << 22);
ew32(TXDCTL1, reg);
ew32(TXDCTL(1), reg);
/* Transmit Arbitration Control 0 */
reg = er32(TARC0);
reg = er32(TARC(0));
if (hw->mac.type == e1000_ich8lan)
reg |= (1 << 28) | (1 << 29);
reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
ew32(TARC0, reg);
ew32(TARC(0), reg);
/* Transmit Arbitration Control 1 */
reg = er32(TARC1);
reg = er32(TARC(1));
if (er32(TCTL) & E1000_TCTL_MULR)
reg &= ~(1 << 28);
else
reg |= (1 << 28);
reg |= (1 << 24) | (1 << 26) | (1 << 30);
ew32(TARC1, reg);
ew32(TARC(1), reg);
/* Device Status */
if (hw->mac.type == e1000_ich8lan) {
@ -1807,29 +1852,29 @@ static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
**/
static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val;
if (e1000_check_reset_block(hw))
return 0;
/* ICH parts do not have a word in the NVM to determine
/*
* ICH parts do not have a word in the NVM to determine
* the default flow control setting, so we explicitly
* set it to full.
*/
if (mac->fc == e1000_fc_default)
mac->fc = e1000_fc_full;
if (hw->fc.type == e1000_fc_default)
hw->fc.type = e1000_fc_full;
mac->original_fc = mac->fc;
hw->fc.original_type = hw->fc.type;
hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc);
hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type);
/* Continue to configure the copper link. */
ret_val = e1000_setup_copper_link_ich8lan(hw);
if (ret_val)
return ret_val;
ew32(FCTTV, mac->fc_pause_time);
ew32(FCTTV, hw->fc.pause_time);
return e1000e_set_fc_watermarks(hw);
}
@ -1853,9 +1898,11 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
ew32(CTRL, ctrl);
/* Set the mac to wait the maximum time between each iteration
/*
* Set the mac to wait the maximum time between each iteration
* and increase the max iterations when polling the phy;
* this fixes erroneous timeouts at 10Mbps. */
* this fixes erroneous timeouts at 10Mbps.
*/
ret_val = e1000e_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
if (ret_val)
return ret_val;
@ -1882,7 +1929,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
* @speed: pointer to store current link speed
* @duplex: pointer to store the current link duplex
*
* Calls the generic get_speed_and_duplex to retreive the current link
* Calls the generic get_speed_and_duplex to retrieve the current link
* information and then calls the Kumeran lock loss workaround for links at
* gigabit speeds.
**/
@ -1930,9 +1977,11 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
if (!dev_spec->kmrn_lock_loss_workaround_enabled)
return 0;
/* Make sure link is up before proceeding. If not just return.
/*
* Make sure link is up before proceeding. If not just return.
* Attempting this while link is negotiating fouled up link
* stability */
* stability
*/
ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
if (!link)
return 0;
@ -1961,8 +2010,10 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
ew32(PHY_CTRL, phy_ctrl);
/* Call gig speed drop workaround on Gig disable before accessing
* any PHY registers */
/*
* Call gig speed drop workaround on Gig disable before accessing
* any PHY registers
*/
e1000e_gig_downshift_workaround_ich8lan(hw);
/* unable to acquire PCS lock */
@ -1970,7 +2021,7 @@ static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
}
/**
* e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state
* e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
* @hw: pointer to the HW structure
* @state: boolean value used to set the current Kumeran workaround state
*
@ -2017,8 +2068,10 @@ void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
ew32(PHY_CTRL, reg);
/* Call gig speed drop workaround on Gig disable before
* accessing any PHY registers */
/*
* Call gig speed drop workaround on Gig disable before
* accessing any PHY registers
*/
if (hw->mac.type == e1000_ich8lan)
e1000e_gig_downshift_workaround_ich8lan(hw);
@ -2158,7 +2211,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
.get_link_up_info = e1000_get_link_up_info_ich8lan,
.led_on = e1000_led_on_ich8lan,
.led_off = e1000_led_off_ich8lan,
.mc_addr_list_update = e1000e_mc_addr_list_update_generic,
.update_mc_addr_list = e1000e_update_mc_addr_list_generic,
.reset_hw = e1000_reset_hw_ich8lan,
.init_hw = e1000_init_hw_ich8lan,
.setup_link = e1000_setup_link_ich8lan,
@ -2200,7 +2253,7 @@ struct e1000_info e1000_ich8_info = {
| FLAG_HAS_FLASH
| FLAG_APME_IN_WUC,
.pba = 8,
.get_invariants = e1000_get_invariants_ich8lan,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
.nvm_ops = &ich8_nvm_ops,
@ -2217,7 +2270,7 @@ struct e1000_info e1000_ich9_info = {
| FLAG_HAS_FLASH
| FLAG_APME_IN_WUC,
.pba = 10,
.get_invariants = e1000_get_invariants_ich8lan,
.get_variants = e1000_get_variants_ich8lan,
.mac_ops = &ich8_mac_ops,
.phy_ops = &ich8_phy_ops,
.nvm_ops = &ich8_nvm_ops,

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2007 Intel Corporation.
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@ -43,8 +43,8 @@ enum e1000_mng_mode {
#define E1000_FACTPS_MNGCG 0x20000000
#define E1000_IAMT_SIGNATURE 0x544D4149 /* Intel(R) Active Management
* Technology signature */
/* Intel(R) Active Management Technology signature */
#define E1000_IAMT_SIGNATURE 0x544D4149
/**
* e1000e_get_bus_info_pcie - Get PCIe bus information
@ -142,7 +142,8 @@ void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
/* HW expects these in little endian so we reverse the byte order
/*
* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
rar_low = ((u32) addr[0] |
@ -171,7 +172,8 @@ static void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
{
u32 hash_bit, hash_reg, mta;
/* The MTA is a register array of 32-bit registers. It is
/*
* The MTA is a register array of 32-bit registers. It is
* treated like an array of (32*mta_reg_count) bits. We want to
* set bit BitArray[hash_value]. So we figure out what register
* the bit is in, read it, OR in the new bit, then write
@ -208,12 +210,15 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
/* Register count multiplied by bits per register */
hash_mask = (hw->mac.mta_reg_count * 32) - 1;
/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask. */
/*
* For a mc_filter_type of 0, bit_shift is the number of left-shifts
* where 0xFF would still fall within the hash mask.
*/
while (hash_mask >> bit_shift != 0xFF)
bit_shift++;
/* The portion of the address that is used for the hash table
/*
* The portion of the address that is used for the hash table
* is determined by the mc_filter_type setting.
* The algorithm is such that there is a total of 8 bits of shifting.
* The bit_shift for a mc_filter_type of 0 represents the number of
@ -224,8 +229,8 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
* cases are a variation of this algorithm...essentially raising the
* number of bits to shift mc_addr[5] left, while still keeping the
* 8-bit shifting total.
*/
/* For example, given the following Destination MAC Address and an
*
* For example, given the following Destination MAC Address and an
* mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
* we can see that the bit_shift for case 0 is 4. These are the hash
* values resulting from each mc_filter_type...
@ -260,7 +265,7 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
}
/**
* e1000e_mc_addr_list_update_generic - Update Multicast addresses
* e1000e_update_mc_addr_list_generic - Update Multicast addresses
* @hw: pointer to the HW structure
* @mc_addr_list: array of multicast addresses to program
* @mc_addr_count: number of multicast addresses to program
@ -272,14 +277,15 @@ static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
* The parameter rar_count will usually be hw->mac.rar_entry_count
* unless there are workarounds that change this.
**/
void e1000e_mc_addr_list_update_generic(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count)
void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
u8 *mc_addr_list, u32 mc_addr_count,
u32 rar_used_count, u32 rar_count)
{
u32 hash_value;
u32 i;
/* Load the first set of multicast addresses into the exact
/*
* Load the first set of multicast addresses into the exact
* filters (RAR). If there are not enough to fill the RAR
* array, clear the filters.
*/
@ -375,7 +381,8 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
s32 ret_val;
bool link;
/* We only want to go out to the PHY registers to see if Auto-Neg
/*
* We only want to go out to the PHY registers to see if Auto-Neg
* has completed and/or if our link status has changed. The
* get_link_status flag is set upon receiving a Link Status
* Change or Rx Sequence Error interrupt.
@ -383,7 +390,8 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
if (!mac->get_link_status)
return 0;
/* First we want to see if the MII Status Register reports
/*
* First we want to see if the MII Status Register reports
* link. If so, then we want to get the current speed/duplex
* of the PHY.
*/
@ -396,11 +404,14 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
mac->get_link_status = 0;
/* Check if there was DownShift, must be checked
* immediately after link-up */
/*
* Check if there was DownShift, must be checked
* immediately after link-up
*/
e1000e_check_downshift(hw);
/* If we are forcing speed/duplex, then we simply return since
/*
* If we are forcing speed/duplex, then we simply return since
* we have already determined whether we have link or not.
*/
if (!mac->autoneg) {
@ -408,13 +419,15 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
return ret_val;
}
/* Auto-Neg is enabled. Auto Speed Detection takes care
/*
* Auto-Neg is enabled. Auto Speed Detection takes care
* of MAC speed/duplex configuration. So we only need to
* configure Collision Distance in the MAC.
*/
e1000e_config_collision_dist(hw);
/* Configure Flow Control now that Auto-Neg has completed.
/*
* Configure Flow Control now that Auto-Neg has completed.
* First, we need to restore the desired flow control
* settings because we may have had to re-autoneg with a
* different link partner.
@ -446,7 +459,8 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
status = er32(STATUS);
rxcw = er32(RXCW);
/* If we don't have link (auto-negotiation failed or link partner
/*
* If we don't have link (auto-negotiation failed or link partner
* cannot auto-negotiate), the cable is plugged in (we have signal),
* and our link partner is not trying to auto-negotiate with us (we
* are receiving idles or data), we need to force link up. We also
@ -477,7 +491,8 @@ s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
return ret_val;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
/* If we are forcing link and we are receiving /C/ ordered
/*
* If we are forcing link and we are receiving /C/ ordered
* sets, re-enable auto-negotiation in the TXCW register
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
@ -511,7 +526,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
status = er32(STATUS);
rxcw = er32(RXCW);
/* If we don't have link (auto-negotiation failed or link partner
/*
* If we don't have link (auto-negotiation failed or link partner
* cannot auto-negotiate), and our link partner is not trying to
* auto-negotiate with us (we are receiving idles or data),
* we need to force link up. We also need to give auto-negotiation
@ -540,7 +556,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
return ret_val;
}
} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
/* If we are forcing link and we are receiving /C/ ordered
/*
* If we are forcing link and we are receiving /C/ ordered
* sets, re-enable auto-negotiation in the TXCW register
* and disable forced link in the Device Control register
* in an attempt to auto-negotiate with our link partner.
@ -551,7 +568,8 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
mac->serdes_has_link = 1;
} else if (!(E1000_TXCW_ANE & er32(TXCW))) {
/* If we force link for non-auto-negotiation switch, check
/*
* If we force link for non-auto-negotiation switch, check
* link status based on MAC synchronization for internal
* serdes media type.
*/
@ -585,11 +603,11 @@ s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
**/
static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val;
u16 nvm_data;
/* Read and store word 0x0F of the EEPROM. This word contains bits
/*
* Read and store word 0x0F of the EEPROM. This word contains bits
* that determine the hardware's default PAUSE (flow control) mode,
* a bit that determines whether the HW defaults to enabling or
* disabling auto-negotiation, and the direction of the
@ -605,12 +623,12 @@ static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
}
if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
mac->fc = e1000_fc_none;
hw->fc.type = e1000_fc_none;
else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
NVM_WORD0F_ASM_DIR)
mac->fc = e1000_fc_tx_pause;
hw->fc.type = e1000_fc_tx_pause;
else
mac->fc = e1000_fc_full;
hw->fc.type = e1000_fc_full;
return 0;
}
@ -630,7 +648,8 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
struct e1000_mac_info *mac = &hw->mac;
s32 ret_val;
/* In the case of the phy reset being blocked, we already have a link.
/*
* In the case of the phy reset being blocked, we already have a link.
* We do not need to set it up again.
*/
if (e1000_check_reset_block(hw))
@ -640,26 +659,28 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
* If flow control is set to default, set flow control based on
* the EEPROM flow control settings.
*/
if (mac->fc == e1000_fc_default) {
if (hw->fc.type == e1000_fc_default) {
ret_val = e1000_set_default_fc_generic(hw);
if (ret_val)
return ret_val;
}
/* We want to save off the original Flow Control configuration just
/*
* We want to save off the original Flow Control configuration just
* in case we get disconnected and then reconnected into a different
* hub or switch with different Flow Control capabilities.
*/
mac->original_fc = mac->fc;
hw->fc.original_type = hw->fc.type;
hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", mac->fc);
hw_dbg(hw, "After fix-ups FlowControl is now = %x\n", hw->fc.type);
/* Call the necessary media_type subroutine to configure the link. */
ret_val = mac->ops.setup_physical_interface(hw);
if (ret_val)
return ret_val;
/* Initialize the flow control address, type, and PAUSE timer
/*
* Initialize the flow control address, type, and PAUSE timer
* registers to their default values. This is done even if flow
* control is disabled, because it does not hurt anything to
* initialize these registers.
@ -669,7 +690,7 @@ s32 e1000e_setup_link(struct e1000_hw *hw)
ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
ew32(FCTTV, mac->fc_pause_time);
ew32(FCTTV, hw->fc.pause_time);
return e1000e_set_fc_watermarks(hw);
}
@ -686,7 +707,8 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
struct e1000_mac_info *mac = &hw->mac;
u32 txcw;
/* Check for a software override of the flow control settings, and
/*
* Check for a software override of the flow control settings, and
* setup the device accordingly. If auto-negotiation is enabled, then
* software will have to set the "PAUSE" bits to the correct value in
* the Transmit Config Word Register (TXCW) and re-start auto-
@ -700,31 +722,34 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
* but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames but we
* do not support receiving pause frames).
* 3: Both Rx and TX flow control (symmetric) are enabled.
* 3: Both Rx and Tx flow control (symmetric) are enabled.
*/
switch (mac->fc) {
switch (hw->fc.type) {
case e1000_fc_none:
/* Flow control completely disabled by a software over-ride. */
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
break;
case e1000_fc_rx_pause:
/* RX Flow control is enabled and TX Flow control is disabled
/*
* Rx Flow control is enabled and Tx Flow control is disabled
* by a software over-ride. Since there really isn't a way to
* advertise that we are capable of RX Pause ONLY, we will
* advertise that we support both symmetric and asymmetric RX
* advertise that we are capable of Rx Pause ONLY, we will
* advertise that we support both symmetric and asymmetric Rx
* PAUSE. Later, we will disable the adapter's ability to send
* PAUSE frames.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
break;
case e1000_fc_tx_pause:
/* TX Flow control is enabled, and RX Flow control is disabled,
/*
* Tx Flow control is enabled, and Rx Flow control is disabled,
* by a software over-ride.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
break;
case e1000_fc_full:
/* Flow control (both RX and TX) is enabled by a software
/*
* Flow control (both Rx and Tx) is enabled by a software
* over-ride.
*/
txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
@ -754,7 +779,8 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
u32 i, status;
s32 ret_val;
/* If we have a signal (the cable is plugged in, or assumed true for
/*
* If we have a signal (the cable is plugged in, or assumed true for
* serdes media) then poll for a "Link-Up" indication in the Device
* Status Register. Time-out if a link isn't seen in 500 milliseconds
* seconds (Auto-negotiation should complete in less than 500
@ -769,7 +795,8 @@ static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
if (i == FIBER_LINK_UP_LIMIT) {
hw_dbg(hw, "Never got a valid link from auto-neg!!!\n");
mac->autoneg_failed = 1;
/* AutoNeg failed to achieve a link, so we'll call
/*
* AutoNeg failed to achieve a link, so we'll call
* mac->check_for_link. This routine will force the
* link up if we detect a signal. This will allow us to
* communicate with non-autonegotiating link partners.
@ -811,7 +838,8 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* Since auto-negotiation is enabled, take the link out of reset (the
/*
* Since auto-negotiation is enabled, take the link out of reset (the
* link will be in reset, because we previously reset the chip). This
* will restart auto-negotiation. If auto-negotiation is successful
* then the link-up status bit will be set and the flow control enable
@ -823,11 +851,12 @@ s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
e1e_flush();
msleep(1);
/* For these adapters, the SW defineable pin 1 is set when the optics
/*
* For these adapters, the SW definable pin 1 is set when the optics
* detect a signal. If we have a signal, then poll for a "Link-Up"
* indication.
*/
if (hw->media_type == e1000_media_type_internal_serdes ||
if (hw->phy.media_type == e1000_media_type_internal_serdes ||
(er32(CTRL) & E1000_CTRL_SWDPIN1)) {
ret_val = e1000_poll_fiber_serdes_link_generic(hw);
} else {
@ -864,27 +893,28 @@ void e1000e_config_collision_dist(struct e1000_hw *hw)
*
* Sets the flow control high/low threshold (watermark) registers. If
* flow control XON frame transmission is enabled, then set XON frame
* tansmission as well.
* transmission as well.
**/
s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
u32 fcrtl = 0, fcrth = 0;
/* Set the flow control receive threshold registers. Normally,
/*
* Set the flow control receive threshold registers. Normally,
* these registers will be set to a default threshold that may be
* adjusted later by the driver's runtime code. However, if the
* ability to transmit pause frames is not enabled, then these
* registers will be set to 0.
*/
if (mac->fc & e1000_fc_tx_pause) {
/* We need to set up the Receive Threshold high and low water
if (hw->fc.type & e1000_fc_tx_pause) {
/*
* We need to set up the Receive Threshold high and low water
* marks as well as (optionally) enabling the transmission of
* XON frames.
*/
fcrtl = mac->fc_low_water;
fcrtl = hw->fc.low_water;
fcrtl |= E1000_FCRTL_XONE;
fcrth = mac->fc_high_water;
fcrth = hw->fc.high_water;
}
ew32(FCRTL, fcrtl);
ew32(FCRTH, fcrth);
@ -904,18 +934,18 @@ s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
**/
s32 e1000e_force_mac_fc(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
u32 ctrl;
ctrl = er32(CTRL);
/* Because we didn't get link via the internal auto-negotiation
/*
* Because we didn't get link via the internal auto-negotiation
* mechanism (we either forced link or we got link via PHY
* auto-neg), we have to manually enable/disable transmit an
* receive flow control.
*
* The "Case" statement below enables/disable flow control
* according to the "mac->fc" parameter.
* according to the "hw->fc.type" parameter.
*
* The possible values of the "fc" parameter are:
* 0: Flow control is completely disabled
@ -923,12 +953,12 @@ s32 e1000e_force_mac_fc(struct e1000_hw *hw)
* frames but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
* frames but we do not receive pause frames).
* 3: Both Rx and TX flow control (symmetric) is enabled.
* 3: Both Rx and Tx flow control (symmetric) is enabled.
* other: No other values should be possible at this point.
*/
hw_dbg(hw, "mac->fc = %u\n", mac->fc);
hw_dbg(hw, "hw->fc.type = %u\n", hw->fc.type);
switch (mac->fc) {
switch (hw->fc.type) {
case e1000_fc_none:
ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
break;
@ -970,16 +1000,17 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
u16 speed, duplex;
/* Check for the case where we have fiber media and auto-neg failed
/*
* Check for the case where we have fiber media and auto-neg failed
* so we had to force link. In this case, we need to force the
* configuration of the MAC to match the "fc" parameter.
*/
if (mac->autoneg_failed) {
if (hw->media_type == e1000_media_type_fiber ||
hw->media_type == e1000_media_type_internal_serdes)
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes)
ret_val = e1000e_force_mac_fc(hw);
} else {
if (hw->media_type == e1000_media_type_copper)
if (hw->phy.media_type == e1000_media_type_copper)
ret_val = e1000e_force_mac_fc(hw);
}
@ -988,13 +1019,15 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
return ret_val;
}
/* Check for the case where we have copper media and auto-neg is
/*
* Check for the case where we have copper media and auto-neg is
* enabled. In this case, we need to check and see if Auto-Neg
* has completed, and if so, how the PHY and link partner has
* flow control configured.
*/
if ((hw->media_type == e1000_media_type_copper) && mac->autoneg) {
/* Read the MII Status Register and check to see if AutoNeg
if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
/*
* Read the MII Status Register and check to see if AutoNeg
* has completed. We read this twice because this reg has
* some "sticky" (latched) bits.
*/
@ -1011,7 +1044,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
return ret_val;
}
/* The AutoNeg process has completed, so we now need to
/*
* The AutoNeg process has completed, so we now need to
* read both the Auto Negotiation Advertisement
* Register (Address 4) and the Auto_Negotiation Base
* Page Ability Register (Address 5) to determine how
@ -1024,7 +1058,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* Two bits in the Auto Negotiation Advertisement Register
/*
* Two bits in the Auto Negotiation Advertisement Register
* (Address 4) and two bits in the Auto Negotiation Base
* Page Ability Register (Address 5) determine flow control
* for both the PHY and the link partner. The following
@ -1045,8 +1080,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
* 1 | 1 | 0 | 0 | e1000_fc_none
* 1 | 1 | 0 | 1 | e1000_fc_rx_pause
*
*/
/* Are both PAUSE bits set to 1? If so, this implies
*
* Are both PAUSE bits set to 1? If so, this implies
* Symmetric Flow Control is enabled at both ends. The
* ASM_DIR bits are irrelevant per the spec.
*
@ -1060,22 +1095,24 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
*/
if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
/* Now we need to check if the user selected RX ONLY
/*
* Now we need to check if the user selected Rx ONLY
* of pause frames. In this case, we had to advertise
* FULL flow control because we could not advertise RX
* FULL flow control because we could not advertise Rx
* ONLY. Hence, we must now check to see if we need to
* turn OFF the TRANSMISSION of PAUSE frames.
*/
if (mac->original_fc == e1000_fc_full) {
mac->fc = e1000_fc_full;
if (hw->fc.original_type == e1000_fc_full) {
hw->fc.type = e1000_fc_full;
hw_dbg(hw, "Flow Control = FULL.\r\n");
} else {
mac->fc = e1000_fc_rx_pause;
hw->fc.type = e1000_fc_rx_pause;
hw_dbg(hw, "Flow Control = "
"RX PAUSE frames only.\r\n");
}
}
/* For receiving PAUSE frames ONLY.
/*
* For receiving PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@ -1087,10 +1124,11 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
(mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
mac->fc = e1000_fc_tx_pause;
hw_dbg(hw, "Flow Control = TX PAUSE frames only.\r\n");
hw->fc.type = e1000_fc_tx_pause;
hw_dbg(hw, "Flow Control = Tx PAUSE frames only.\r\n");
}
/* For transmitting PAUSE frames ONLY.
/*
* For transmitting PAUSE frames ONLY.
*
* LOCAL DEVICE | LINK PARTNER
* PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
@ -1102,18 +1140,19 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
(mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
!(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
(mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
mac->fc = e1000_fc_rx_pause;
hw_dbg(hw, "Flow Control = RX PAUSE frames only.\r\n");
hw->fc.type = e1000_fc_rx_pause;
hw_dbg(hw, "Flow Control = Rx PAUSE frames only.\r\n");
} else {
/*
* Per the IEEE spec, at this point flow control
* should be disabled.
*/
mac->fc = e1000_fc_none;
hw->fc.type = e1000_fc_none;
hw_dbg(hw, "Flow Control = NONE.\r\n");
}
/* Now we need to do one last check... If we auto-
/*
* Now we need to do one last check... If we auto-
* negotiated to HALF DUPLEX, flow control should not be
* enabled per IEEE 802.3 spec.
*/
@ -1124,9 +1163,10 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
}
if (duplex == HALF_DUPLEX)
mac->fc = e1000_fc_none;
hw->fc.type = e1000_fc_none;
/* Now we call a subroutine to actually force the MAC
/*
* Now we call a subroutine to actually force the MAC
* controller to use the correct flow control settings.
*/
ret_val = e1000e_force_mac_fc(hw);
@ -1393,13 +1433,15 @@ s32 e1000e_blink_led(struct e1000_hw *hw)
u32 ledctl_blink = 0;
u32 i;
if (hw->media_type == e1000_media_type_fiber) {
if (hw->phy.media_type == e1000_media_type_fiber) {
/* always blink LED0 for PCI-E fiber */
ledctl_blink = E1000_LEDCTL_LED0_BLINK |
(E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
} else {
/* set the blink bit for each LED that's "on" (0x0E)
* in ledctl_mode2 */
/*
* set the blink bit for each LED that's "on" (0x0E)
* in ledctl_mode2
*/
ledctl_blink = hw->mac.ledctl_mode2;
for (i = 0; i < 4; i++)
if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
@ -1423,7 +1465,7 @@ s32 e1000e_led_on_generic(struct e1000_hw *hw)
{
u32 ctrl;
switch (hw->media_type) {
switch (hw->phy.media_type) {
case e1000_media_type_fiber:
ctrl = er32(CTRL);
ctrl &= ~E1000_CTRL_SWDPIN0;
@ -1450,7 +1492,7 @@ s32 e1000e_led_off_generic(struct e1000_hw *hw)
{
u32 ctrl;
switch (hw->media_type) {
switch (hw->phy.media_type) {
case e1000_media_type_fiber:
ctrl = er32(CTRL);
ctrl |= E1000_CTRL_SWDPIN0;
@ -1562,8 +1604,7 @@ void e1000e_update_adaptive(struct e1000_hw *hw)
else
mac->current_ifs_val +=
mac->ifs_step_size;
ew32(AIT,
mac->current_ifs_val);
ew32(AIT, mac->current_ifs_val);
}
}
} else {
@ -1826,10 +1867,12 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
udelay(1);
timeout = NVM_MAX_RETRY_SPI;
/* Read "Status Register" repeatedly until the LSB is cleared.
/*
* Read "Status Register" repeatedly until the LSB is cleared.
* The EEPROM will signal that the command has been completed
* by clearing bit 0 of the internal status register. If it's
* not cleared within 'timeout', then error out. */
* not cleared within 'timeout', then error out.
*/
while (timeout) {
e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
hw->nvm.opcode_bits);
@ -1851,62 +1894,6 @@ static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
return 0;
}
/**
* e1000e_read_nvm_spi - Reads EEPROM using SPI
* @hw: pointer to the HW structure
* @offset: offset of word in the EEPROM to read
* @words: number of words to read
* @data: word read from the EEPROM
*
* Reads a 16 bit word from the EEPROM.
**/
s32 e1000e_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
{
struct e1000_nvm_info *nvm = &hw->nvm;
u32 i = 0;
s32 ret_val;
u16 word_in;
u8 read_opcode = NVM_READ_OPCODE_SPI;
/* A check for invalid values: offset too large, too many words,
* and not enough words. */
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
hw_dbg(hw, "nvm parameter(s) out of bounds\n");
return -E1000_ERR_NVM;
}
ret_val = nvm->ops.acquire_nvm(hw);
if (ret_val)
return ret_val;
ret_val = e1000_ready_nvm_eeprom(hw);
if (ret_val) {
nvm->ops.release_nvm(hw);
return ret_val;
}
e1000_standby_nvm(hw);
if ((nvm->address_bits == 8) && (offset >= 128))
read_opcode |= NVM_A8_OPCODE_SPI;
/* Send the READ command (opcode + addr) */
e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
/* Read the data. SPI NVMs increment the address with each byte
* read and will roll over if reading beyond the end. This allows
* us to read the whole NVM from any offset */
for (i = 0; i < words; i++) {
word_in = e1000_shift_in_eec_bits(hw, 16);
data[i] = (word_in >> 8) | (word_in << 8);
}
nvm->ops.release_nvm(hw);
return 0;
}
/**
* e1000e_read_nvm_eerd - Reads EEPROM using EERD register
* @hw: pointer to the HW structure
@ -1922,8 +1909,10 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
u32 i, eerd = 0;
s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words,
* and not enough words. */
/*
* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
hw_dbg(hw, "nvm parameter(s) out of bounds\n");
@ -1939,8 +1928,7 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
if (ret_val)
break;
data[i] = (er32(EERD) >>
E1000_NVM_RW_REG_DATA);
data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
}
return ret_val;
@ -1964,8 +1952,10 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
s32 ret_val;
u16 widx = 0;
/* A check for invalid values: offset too large, too many words,
* and not enough words. */
/*
* A check for invalid values: offset too large, too many words,
* and not enough words.
*/
if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
(words == 0)) {
hw_dbg(hw, "nvm parameter(s) out of bounds\n");
@ -1995,8 +1985,10 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
e1000_standby_nvm(hw);
/* Some SPI eeproms use the 8th address bit embedded in the
* opcode */
/*
* Some SPI eeproms use the 8th address bit embedded in the
* opcode
*/
if ((nvm->address_bits == 8) && (offset >= 128))
write_opcode |= NVM_A8_OPCODE_SPI;
@ -2041,9 +2033,9 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
/* Check for an alternate MAC address. An alternate MAC
* address can be setup by pre-boot software and must be
* treated like a permanent address and must override the
* actual permanent MAC address. */
* actual permanent MAC address.*/
ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
&mac_addr_offset);
&mac_addr_offset);
if (ret_val) {
hw_dbg(hw, "NVM Read Error\n");
return ret_val;
@ -2056,7 +2048,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
mac_addr_offset += ETH_ALEN/sizeof(u16);
/* make sure we have a valid mac address here
* before using it */
* before using it */
ret_val = e1000_read_nvm(hw, mac_addr_offset, 1,
&nvm_data);
if (ret_val) {
@ -2068,7 +2060,7 @@ s32 e1000e_read_mac_addr(struct e1000_hw *hw)
}
if (mac_addr_offset)
hw->dev_spec.e82571.alt_mac_addr_is_present = 1;
hw->dev_spec.e82571.alt_mac_addr_is_present = 1;
}
for (i = 0; i < ETH_ALEN; i += 2) {
@ -2244,7 +2236,7 @@ bool e1000e_check_mng_mode(struct e1000_hw *hw)
}
/**
* e1000e_enable_tx_pkt_filtering - Enable packet filtering on TX
* e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
* @hw: pointer to the HW structure
*
* Enables packet filtering on transmit packets if manageability is enabled
@ -2264,7 +2256,8 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
return 0;
}
/* If we can't read from the host interface for whatever
/*
* If we can't read from the host interface for whatever
* reason, disable filtering.
*/
ret_val = e1000_mng_enable_host_if(hw);
@ -2282,7 +2275,8 @@ bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
hdr->checksum = 0;
csum = e1000_calculate_checksum((u8 *)hdr,
E1000_MNG_DHCP_COOKIE_LENGTH);
/* If either the checksums or signature don't match, then
/*
* If either the checksums or signature don't match, then
* the cookie area isn't considered valid, in which case we
* take the safe route of assuming Tx filtering is enabled.
*/
@ -2374,8 +2368,10 @@ static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
/* Calculate length in DWORDs */
length >>= 2;
/* The device driver writes the relevant command block into the
* ram area. */
/*
* The device driver writes the relevant command block into the
* ram area.
*/
for (i = 0; i < length; i++) {
for (j = 0; j < sizeof(u32); j++) {
*(tmp + j) = *bufptr++;
@ -2481,7 +2477,7 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
return ret_val;
}
s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num)
s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
{
s32 ret_val;
u16 nvm_data;
@ -2491,14 +2487,14 @@ s32 e1000e_read_part_num(struct e1000_hw *hw, u32 *part_num)
hw_dbg(hw, "NVM Read Error\n");
return ret_val;
}
*part_num = (u32)(nvm_data << 16);
*pba_num = (u32)(nvm_data << 16);
ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
if (ret_val) {
hw_dbg(hw, "NVM Read Error\n");
return ret_val;
}
*part_num |= nvm_data;
*pba_num |= nvm_data;
return 0;
}

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2007 Intel Corporation.
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@ -30,7 +30,8 @@
#include "e1000.h"
/* This is the only thing that needs to be changed to adjust the
/*
* This is the only thing that needs to be changed to adjust the
* maximum number of ports that the driver can manage.
*/
@ -46,7 +47,8 @@ module_param(copybreak, uint, 0644);
MODULE_PARM_DESC(copybreak,
"Maximum size of packet that is copied to a new buffer on receive");
/* All parameters are treated the same, as an integer array of values.
/*
* All parameters are treated the same, as an integer array of values.
* This macro just reduces the need to repeat the same declaration code
* over and over (plus this helps to avoid typo bugs).
*/
@ -60,8 +62,9 @@ MODULE_PARM_DESC(copybreak,
MODULE_PARM_DESC(X, desc);
/* Transmit Interrupt Delay in units of 1.024 microseconds
* Tx interrupt delay needs to typically be set to something non zero
/*
* Transmit Interrupt Delay in units of 1.024 microseconds
* Tx interrupt delay needs to typically be set to something non zero
*
* Valid Range: 0-65535
*/
@ -70,7 +73,8 @@ E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
#define MAX_TXDELAY 0xFFFF
#define MIN_TXDELAY 0
/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
/*
* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
@ -79,8 +83,9 @@ E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
#define MAX_TXABSDELAY 0xFFFF
#define MIN_TXABSDELAY 0
/* Receive Interrupt Delay in units of 1.024 microseconds
* hardware will likely hang if you set this to anything but zero.
/*
* Receive Interrupt Delay in units of 1.024 microseconds
* hardware will likely hang if you set this to anything but zero.
*
* Valid Range: 0-65535
*/
@ -89,7 +94,8 @@ E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
#define MAX_RXDELAY 0xFFFF
#define MIN_RXDELAY 0
/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
/*
* Receive Absolute Interrupt Delay in units of 1.024 microseconds
*
* Valid Range: 0-65535
*/
@ -98,7 +104,8 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
#define MAX_RXABSDELAY 0xFFFF
#define MIN_RXABSDELAY 0
/* Interrupt Throttle Rate (interrupts/sec)
/*
* Interrupt Throttle Rate (interrupts/sec)
*
* Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
*/
@ -107,7 +114,8 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
#define MAX_ITR 100000
#define MIN_ITR 100
/* Enable Smart Power Down of the PHY
/*
* Enable Smart Power Down of the PHY
*
* Valid Range: 0, 1
*
@ -115,7 +123,8 @@ E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
*/
E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
/* Enable Kumeran Lock Loss workaround
/*
* Enable Kumeran Lock Loss workaround
*
* Valid Range: 0, 1
*

Просмотреть файл

@ -1,7 +1,7 @@
/*******************************************************************************
Intel PRO/1000 Linux driver
Copyright(c) 1999 - 2007 Intel Corporation.
Copyright(c) 1999 - 2008 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@ -134,7 +134,8 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
return -E1000_ERR_PARAM;
}
/* Set up Op-code, Phy Address, and register offset in the MDI
/*
* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
@ -144,7 +145,11 @@ static s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
ew32(MDIC, mdic);
/* Poll the ready bit to see if the MDI read completed */
/*
* Poll the ready bit to see if the MDI read completed
* Increasing the time out as testing showed failures with
* the lower time out
*/
for (i = 0; i < 64; i++) {
udelay(50);
mdic = er32(MDIC);
@ -182,7 +187,8 @@ static s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
return -E1000_ERR_PARAM;
}
/* Set up Op-code, Phy Address, and register offset in the MDI
/*
* Set up Op-code, Phy Address, and register offset in the MDI
* Control register. The MAC will take care of interfacing with the
* PHY to retrieve the desired data.
*/
@ -409,14 +415,15 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
s32 ret_val;
u16 phy_data;
/* Enable CRS on TX. This must be set for half-duplex operation. */
/* Enable CRS on Tx. This must be set for half-duplex operation. */
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
if (ret_val)
return ret_val;
phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
/* Options:
/*
* Options:
* MDI/MDI-X = 0 (default)
* 0 - Auto for all speeds
* 1 - MDI mode
@ -441,7 +448,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
break;
}
/* Options:
/*
* Options:
* disable_polarity_correction = 0 (default)
* Automatic Correction for Reversed Cable Polarity
* 0 - Disabled
@ -456,7 +464,8 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
return ret_val;
if (phy->revision < 4) {
/* Force TX_CLK in the Extended PHY Specific Control Register
/*
* Force TX_CLK in the Extended PHY Specific Control Register
* to 25MHz clock.
*/
ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
@ -543,19 +552,21 @@ s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
/* set auto-master slave resolution settings */
if (hw->mac.autoneg) {
/* when autonegotiation advertisement is only 1000Mbps then we
/*
* when autonegotiation advertisement is only 1000Mbps then we
* should disable SmartSpeed and enable Auto MasterSlave
* resolution as hardware default. */
* resolution as hardware default.
*/
if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
/* Disable SmartSpeed */
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
&data);
if (ret_val)
return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
data);
if (ret_val)
return ret_val;
@ -630,14 +641,16 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
return ret_val;
}
/* Need to parse both autoneg_advertised and fc and set up
/*
* Need to parse both autoneg_advertised and fc and set up
* the appropriate PHY registers. First we will parse for
* autoneg_advertised software override. Since we can advertise
* a plethora of combinations, we need to check each bit
* individually.
*/
/* First we clear all the 10/100 mb speed bits in the Auto-Neg
/*
* First we clear all the 10/100 mb speed bits in the Auto-Neg
* Advertisement Register (Address 4) and the 1000 mb speed bits in
* the 1000Base-T Control Register (Address 9).
*/
@ -683,7 +696,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
}
/* Check for a software override of the flow control settings, and
/*
* Check for a software override of the flow control settings, and
* setup the PHY advertisement registers accordingly. If
* auto-negotiation is enabled, then software will have to set the
* "PAUSE" bits to the correct value in the Auto-Negotiation
@ -696,38 +710,42 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* but not send pause frames).
* 2: Tx flow control is enabled (we can send pause frames
* but we do not support receiving pause frames).
* 3: Both Rx and TX flow control (symmetric) are enabled.
* 3: Both Rx and Tx flow control (symmetric) are enabled.
* other: No software override. The flow control configuration
* in the EEPROM is used.
*/
switch (hw->mac.fc) {
switch (hw->fc.type) {
case e1000_fc_none:
/* Flow control (RX & TX) is completely disabled by a
/*
* Flow control (Rx & Tx) is completely disabled by a
* software over-ride.
*/
mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case e1000_fc_rx_pause:
/* RX Flow control is enabled, and TX Flow control is
/*
* Rx Flow control is enabled, and Tx Flow control is
* disabled, by a software over-ride.
*/
/* Since there really isn't a way to advertise that we are
* capable of RX Pause ONLY, we will advertise that we
* support both symmetric and asymmetric RX PAUSE. Later
*
* Since there really isn't a way to advertise that we are
* capable of Rx Pause ONLY, we will advertise that we
* support both symmetric and asymmetric Rx PAUSE. Later
* (in e1000e_config_fc_after_link_up) we will disable the
* hw's ability to send PAUSE frames.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
break;
case e1000_fc_tx_pause:
/* TX Flow control is enabled, and RX Flow control is
/*
* Tx Flow control is enabled, and Rx Flow control is
* disabled, by a software over-ride.
*/
mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
break;
case e1000_fc_full:
/* Flow control (both RX and TX) is enabled by a software
/*
* Flow control (both Rx and Tx) is enabled by a software
* over-ride.
*/
mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
@ -758,7 +776,7 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
* Performs initial bounds checking on autoneg advertisement parameter, then
* configure to advertise the full capability. Setup the PHY to autoneg
* and restart the negotiation process between the link partner. If
* wait_for_link, then wait for autoneg to complete before exiting.
* autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
**/
static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
{
@ -766,12 +784,14 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
s32 ret_val;
u16 phy_ctrl;
/* Perform some bounds checking on the autoneg advertisement
/*
* Perform some bounds checking on the autoneg advertisement
* parameter.
*/
phy->autoneg_advertised &= phy->autoneg_mask;
/* If autoneg_advertised is zero, we assume it was not defaulted
/*
* If autoneg_advertised is zero, we assume it was not defaulted
* by the calling code so we set to advertise full capability.
*/
if (phy->autoneg_advertised == 0)
@ -785,7 +805,8 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
}
hw_dbg(hw, "Restarting Auto-Neg\n");
/* Restart auto-negotiation by setting the Auto Neg Enable bit and
/*
* Restart auto-negotiation by setting the Auto Neg Enable bit and
* the Auto Neg Restart bit in the PHY control register.
*/
ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
@ -797,10 +818,11 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* Does the user want to wait for Auto-Neg to complete here, or
/*
* Does the user want to wait for Auto-Neg to complete here, or
* check at a later time (for example, callback routine).
*/
if (phy->wait_for_link) {
if (phy->autoneg_wait_to_complete) {
ret_val = e1000_wait_autoneg(hw);
if (ret_val) {
hw_dbg(hw, "Error while waiting for "
@ -829,14 +851,18 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
bool link;
if (hw->mac.autoneg) {
/* Setup autoneg and flow control advertisement and perform
* autonegotiation. */
/*
* Setup autoneg and flow control advertisement and perform
* autonegotiation.
*/
ret_val = e1000_copper_link_autoneg(hw);
if (ret_val)
return ret_val;
} else {
/* PHY will be set to 10H, 10F, 100H or 100F
* depending on user settings. */
/*
* PHY will be set to 10H, 10F, 100H or 100F
* depending on user settings.
*/
hw_dbg(hw, "Forcing Speed and Duplex\n");
ret_val = e1000_phy_force_speed_duplex(hw);
if (ret_val) {
@ -845,7 +871,8 @@ s32 e1000e_setup_copper_link(struct e1000_hw *hw)
}
}
/* Check link status. Wait up to 100 microseconds for link to become
/*
* Check link status. Wait up to 100 microseconds for link to become
* valid.
*/
ret_val = e1000e_phy_has_link_generic(hw,
@ -891,7 +918,8 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* Clear Auto-Crossover to force MDI manually. IGP requires MDI
/*
* Clear Auto-Crossover to force MDI manually. IGP requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
@ -909,7 +937,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
udelay(1);
if (phy->wait_for_link) {
if (phy->autoneg_wait_to_complete) {
hw_dbg(hw, "Waiting for forced speed/duplex link on IGP phy.\n");
ret_val = e1000e_phy_has_link_generic(hw,
@ -941,7 +969,7 @@ s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
* Calls the PHY setup function to force speed and duplex. Clears the
* auto-crossover to force MDI manually. Resets the PHY to commit the
* changes. If time expires while waiting for link up, we reset the DSP.
* After reset, TX_CLK and CRS on TX must be set. Return successful upon
* After reset, TX_CLK and CRS on Tx must be set. Return successful upon
* successful completion, else return corresponding error code.
**/
s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
@ -951,7 +979,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
u16 phy_data;
bool link;
/* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
/*
* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
* forced whenever speed and duplex are forced.
*/
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@ -980,7 +1009,7 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
udelay(1);
if (phy->wait_for_link) {
if (phy->autoneg_wait_to_complete) {
hw_dbg(hw, "Waiting for forced speed/duplex link on M88 phy.\n");
ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
@ -989,10 +1018,12 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
return ret_val;
if (!link) {
/* We didn't get link.
/*
* We didn't get link.
* Reset the DSP and cross our fingers.
*/
ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT, 0x001d);
ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
0x001d);
if (ret_val)
return ret_val;
ret_val = e1000e_phy_reset_dsp(hw);
@ -1011,7 +1042,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* Resetting the phy means we need to re-force TX_CLK in the
/*
* Resetting the phy means we need to re-force TX_CLK in the
* Extended PHY Specific Control Register to 25MHz clock from
* the reset value of 2.5MHz.
*/
@ -1020,7 +1052,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* In addition, we must re-enable CRS on Tx for both half and full
/*
* In addition, we must re-enable CRS on Tx for both half and full
* duplex.
*/
ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
@ -1051,7 +1084,7 @@ void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
u32 ctrl;
/* Turn off flow control when forcing speed/duplex */
mac->fc = e1000_fc_none;
hw->fc.type = e1000_fc_none;
/* Force speed/duplex on the mac */
ctrl = er32(CTRL);
@ -1124,30 +1157,32 @@ s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
data);
if (ret_val)
return ret_val;
/* LPLU and SmartSpeed are mutually exclusive. LPLU is used
/*
* LPLU and SmartSpeed are mutually exclusive. LPLU is used
* during Dx states where the power conservation is most
* important. During driver activity we should enable
* SmartSpeed, so performance is maintained. */
* SmartSpeed, so performance is maintained.
*/
if (phy->smart_speed == e1000_smart_speed_on) {
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
&data);
if (ret_val)
return ret_val;
data |= IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
data);
if (ret_val)
return ret_val;
} else if (phy->smart_speed == e1000_smart_speed_off) {
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
&data);
&data);
if (ret_val)
return ret_val;
data &= ~IGP01E1000_PSCFR_SMART_SPEED;
ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
data);
data);
if (ret_val)
return ret_val;
}
@ -1249,8 +1284,10 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
s32 ret_val;
u16 data, offset, mask;
/* Polarity is determined based on the speed of
* our connection. */
/*
* Polarity is determined based on the speed of
* our connection.
*/
ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
if (ret_val)
return ret_val;
@ -1260,7 +1297,8 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
offset = IGP01E1000_PHY_PCS_INIT_REG;
mask = IGP01E1000_PHY_POLARITY_MASK;
} else {
/* This really only applies to 10Mbps since
/*
* This really only applies to 10Mbps since
* there is no polarity for 100Mbps (always 0).
*/
offset = IGP01E1000_PHY_PORT_STATUS;
@ -1278,7 +1316,7 @@ static s32 e1000_check_polarity_igp(struct e1000_hw *hw)
}
/**
* e1000_wait_autoneg - Wait for auto-neg compeletion
* e1000_wait_autoneg - Wait for auto-neg completion
* @hw: pointer to the HW structure
*
* Waits for auto-negotiation to complete or for the auto-negotiation time
@ -1302,7 +1340,8 @@ static s32 e1000_wait_autoneg(struct e1000_hw *hw)
msleep(100);
}
/* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
/*
* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
* has completed.
*/
return ret_val;
@ -1324,7 +1363,8 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
u16 i, phy_status;
for (i = 0; i < iterations; i++) {
/* Some PHYs require the PHY_STATUS register to be read
/*
* Some PHYs require the PHY_STATUS register to be read
* twice due to the link bit being sticky. No harm doing
* it across the board.
*/
@ -1412,10 +1452,12 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
if (ret_val)
return ret_val;
/* Getting bits 15:9, which represent the combination of
/*
* Getting bits 15:9, which represent the combination of
* course and fine gain values. The result is a number
* that can be put into the lookup table to obtain the
* approximate cable length. */
* approximate cable length.
*/
cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
IGP02E1000_AGC_LENGTH_MASK;
@ -1466,7 +1508,7 @@ s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
u16 phy_data;
bool link;
if (hw->media_type != e1000_media_type_copper) {
if (hw->phy.media_type != e1000_media_type_copper) {
hw_dbg(hw, "Phy info is only valid for copper media\n");
return -E1000_ERR_CONFIG;
}

Просмотреть файл

@ -422,7 +422,7 @@ struct ehea_fw_handle_entry {
struct ehea_fw_handle_array {
struct ehea_fw_handle_entry *arr;
int num_entries;
struct semaphore lock;
struct mutex lock;
};
struct ehea_bcmc_reg_entry {
@ -435,7 +435,7 @@ struct ehea_bcmc_reg_entry {
struct ehea_bcmc_reg_array {
struct ehea_bcmc_reg_entry *arr;
int num_entries;
struct semaphore lock;
struct mutex lock;
};
#define EHEA_PORT_UP 1
@ -453,7 +453,7 @@ struct ehea_port {
struct vlan_group *vgrp;
struct ehea_eq *qp_eq;
struct work_struct reset_task;
struct semaphore port_lock;
struct mutex port_lock;
char int_aff_name[EHEA_IRQ_NAME_SIZE];
int allmulti; /* Indicates IFF_ALLMULTI state */
int promisc; /* Indicates IFF_PROMISC state */

Просмотреть файл

@ -36,6 +36,7 @@
#include <linux/notifier.h>
#include <linux/reboot.h>
#include <asm/kexec.h>
#include <linux/mutex.h>
#include <net/ip.h>
@ -99,7 +100,7 @@ static int port_name_cnt;
static LIST_HEAD(adapter_list);
u64 ehea_driver_flags;
struct work_struct ehea_rereg_mr_task;
struct semaphore dlpar_mem_lock;
static DEFINE_MUTEX(dlpar_mem_lock);
struct ehea_fw_handle_array ehea_fw_handles;
struct ehea_bcmc_reg_array ehea_bcmc_regs;
@ -1761,7 +1762,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
down(&ehea_bcmc_regs.lock);
mutex_lock(&ehea_bcmc_regs.lock);
/* Deregister old MAC in pHYP */
ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
@ -1779,7 +1780,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
out_upregs:
ehea_update_bcmc_registrations();
up(&ehea_bcmc_regs.lock);
mutex_unlock(&ehea_bcmc_regs.lock);
out_free:
kfree(cb0);
out:
@ -1941,7 +1942,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
}
ehea_promiscuous(dev, 0);
down(&ehea_bcmc_regs.lock);
mutex_lock(&ehea_bcmc_regs.lock);
if (dev->flags & IFF_ALLMULTI) {
ehea_allmulti(dev, 1);
@ -1972,7 +1973,7 @@ static void ehea_set_multicast_list(struct net_device *dev)
}
out:
ehea_update_bcmc_registrations();
up(&ehea_bcmc_regs.lock);
mutex_unlock(&ehea_bcmc_regs.lock);
return;
}
@ -2455,7 +2456,7 @@ static int ehea_up(struct net_device *dev)
if (port->state == EHEA_PORT_UP)
return 0;
down(&ehea_fw_handles.lock);
mutex_lock(&ehea_fw_handles.lock);
ret = ehea_port_res_setup(port, port->num_def_qps,
port->num_add_tx_qps);
@ -2493,7 +2494,7 @@ static int ehea_up(struct net_device *dev)
}
}
down(&ehea_bcmc_regs.lock);
mutex_lock(&ehea_bcmc_regs.lock);
ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
if (ret) {
@ -2516,10 +2517,10 @@ out:
ehea_info("Failed starting %s. ret=%i", dev->name, ret);
ehea_update_bcmc_registrations();
up(&ehea_bcmc_regs.lock);
mutex_unlock(&ehea_bcmc_regs.lock);
ehea_update_firmware_handles();
up(&ehea_fw_handles.lock);
mutex_unlock(&ehea_fw_handles.lock);
return ret;
}
@ -2545,7 +2546,7 @@ static int ehea_open(struct net_device *dev)
int ret;
struct ehea_port *port = netdev_priv(dev);
down(&port->port_lock);
mutex_lock(&port->port_lock);
if (netif_msg_ifup(port))
ehea_info("enabling port %s", dev->name);
@ -2556,7 +2557,7 @@ static int ehea_open(struct net_device *dev)
netif_start_queue(dev);
}
up(&port->port_lock);
mutex_unlock(&port->port_lock);
return ret;
}
@ -2569,18 +2570,18 @@ static int ehea_down(struct net_device *dev)
if (port->state == EHEA_PORT_DOWN)
return 0;
down(&ehea_bcmc_regs.lock);
mutex_lock(&ehea_fw_handles.lock);
mutex_lock(&ehea_bcmc_regs.lock);
ehea_drop_multicast_list(dev);
ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
ehea_free_interrupts(dev);
down(&ehea_fw_handles.lock);
port->state = EHEA_PORT_DOWN;
ehea_update_bcmc_registrations();
up(&ehea_bcmc_regs.lock);
mutex_unlock(&ehea_bcmc_regs.lock);
ret = ehea_clean_all_portres(port);
if (ret)
@ -2588,7 +2589,7 @@ static int ehea_down(struct net_device *dev)
dev->name, ret);
ehea_update_firmware_handles();
up(&ehea_fw_handles.lock);
mutex_unlock(&ehea_fw_handles.lock);
return ret;
}
@ -2602,11 +2603,11 @@ static int ehea_stop(struct net_device *dev)
ehea_info("disabling port %s", dev->name);
flush_scheduled_work();
down(&port->port_lock);
mutex_lock(&port->port_lock);
netif_stop_queue(dev);
port_napi_disable(port);
ret = ehea_down(dev);
up(&port->port_lock);
mutex_unlock(&port->port_lock);
return ret;
}
@ -2820,7 +2821,7 @@ static void ehea_reset_port(struct work_struct *work)
struct net_device *dev = port->netdev;
port->resets++;
down(&port->port_lock);
mutex_lock(&port->port_lock);
netif_stop_queue(dev);
port_napi_disable(port);
@ -2840,7 +2841,7 @@ static void ehea_reset_port(struct work_struct *work)
netif_wake_queue(dev);
out:
up(&port->port_lock);
mutex_unlock(&port->port_lock);
return;
}
@ -2849,7 +2850,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
int ret, i;
struct ehea_adapter *adapter;
down(&dlpar_mem_lock);
mutex_lock(&dlpar_mem_lock);
ehea_info("LPAR memory enlarged - re-initializing driver");
list_for_each_entry(adapter, &adapter_list, list)
@ -2857,22 +2858,24 @@ static void ehea_rereg_mrs(struct work_struct *work)
/* Shutdown all ports */
for (i = 0; i < EHEA_MAX_PORTS; i++) {
struct ehea_port *port = adapter->port[i];
struct net_device *dev;
if (port) {
struct net_device *dev = port->netdev;
if (!port)
continue;
if (dev->flags & IFF_UP) {
down(&port->port_lock);
netif_stop_queue(dev);
ehea_flush_sq(port);
ret = ehea_stop_qps(dev);
if (ret) {
up(&port->port_lock);
goto out;
}
port_napi_disable(port);
up(&port->port_lock);
dev = port->netdev;
if (dev->flags & IFF_UP) {
mutex_lock(&port->port_lock);
netif_stop_queue(dev);
ehea_flush_sq(port);
ret = ehea_stop_qps(dev);
if (ret) {
mutex_unlock(&port->port_lock);
goto out;
}
port_napi_disable(port);
mutex_unlock(&port->port_lock);
}
}
@ -2912,17 +2915,17 @@ static void ehea_rereg_mrs(struct work_struct *work)
struct net_device *dev = port->netdev;
if (dev->flags & IFF_UP) {
down(&port->port_lock);
mutex_lock(&port->port_lock);
port_napi_enable(port);
ret = ehea_restart_qps(dev);
if (!ret)
netif_wake_queue(dev);
up(&port->port_lock);
mutex_unlock(&port->port_lock);
}
}
}
}
up(&dlpar_mem_lock);
mutex_unlock(&dlpar_mem_lock);
ehea_info("re-initializing driver complete");
out:
return;
@ -3083,7 +3086,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
port = netdev_priv(dev);
sema_init(&port->port_lock, 1);
mutex_init(&port->port_lock);
port->state = EHEA_PORT_DOWN;
port->sig_comp_iv = sq_entries / 10;
@ -3362,7 +3365,7 @@ static int __devinit ehea_probe_adapter(struct of_device *dev,
ehea_error("Invalid ibmebus device probed");
return -EINVAL;
}
down(&ehea_fw_handles.lock);
mutex_lock(&ehea_fw_handles.lock);
adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
@ -3446,7 +3449,7 @@ out_free_ad:
out:
ehea_update_firmware_handles();
up(&ehea_fw_handles.lock);
mutex_unlock(&ehea_fw_handles.lock);
return ret;
}
@ -3465,7 +3468,7 @@ static int __devexit ehea_remove(struct of_device *dev)
flush_scheduled_work();
down(&ehea_fw_handles.lock);
mutex_lock(&ehea_fw_handles.lock);
ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
tasklet_kill(&adapter->neq_tasklet);
@ -3476,7 +3479,7 @@ static int __devexit ehea_remove(struct of_device *dev)
kfree(adapter);
ehea_update_firmware_handles();
up(&ehea_fw_handles.lock);
mutex_unlock(&ehea_fw_handles.lock);
return 0;
}
@ -3563,9 +3566,8 @@ int __init ehea_module_init(void)
memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
sema_init(&dlpar_mem_lock, 1);
sema_init(&ehea_fw_handles.lock, 1);
sema_init(&ehea_bcmc_regs.lock, 1);
mutex_init(&ehea_fw_handles.lock);
mutex_init(&ehea_bcmc_regs.lock);
ret = check_module_parm();
if (ret)

Просмотреть файл

@ -198,7 +198,7 @@ static int mpc52xx_fec_init_phy(struct net_device *dev)
struct phy_device *phydev;
char phy_id[BUS_ID_SIZE];
snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT,
snprintf(phy_id, BUS_ID_SIZE, "%x:%02x",
(unsigned int)dev->base_addr, priv->phy_addr);
priv->link = PHY_DOWN;

Просмотреть файл

@ -124,7 +124,7 @@ static int mpc52xx_fec_mdio_probe(struct of_device *of, const struct of_device_i
goto out_free;
}
bus->id = res.start;
snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
bus->priv = priv;
bus->dev = dev;

Просмотреть файл

@ -29,90 +29,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Changelog:
* 0.01: 05 Oct 2003: First release that compiles without warnings.
* 0.02: 05 Oct 2003: Fix bug for nv_drain_tx: do not try to free NULL skbs.
* Check all PCI BARs for the register window.
* udelay added to mii_rw.
* 0.03: 06 Oct 2003: Initialize dev->irq.
* 0.04: 07 Oct 2003: Initialize np->lock, reduce handled irqs, add printks.
* 0.05: 09 Oct 2003: printk removed again, irq status print tx_timeout.
* 0.06: 10 Oct 2003: MAC Address read updated, pff flag generation updated,
* irq mask updated
* 0.07: 14 Oct 2003: Further irq mask updates.
* 0.08: 20 Oct 2003: rx_desc.Length initialization added, nv_alloc_rx refill
* added into irq handler, NULL check for drain_ring.
* 0.09: 20 Oct 2003: Basic link speed irq implementation. Only handle the
* requested interrupt sources.
* 0.10: 20 Oct 2003: First cleanup for release.
* 0.11: 21 Oct 2003: hexdump for tx added, rx buffer sizes increased.
* MAC Address init fix, set_multicast cleanup.
* 0.12: 23 Oct 2003: Cleanups for release.
* 0.13: 25 Oct 2003: Limit for concurrent tx packets increased to 10.
* Set link speed correctly. start rx before starting
* tx (nv_start_rx sets the link speed).
* 0.14: 25 Oct 2003: Nic dependant irq mask.
* 0.15: 08 Nov 2003: fix smp deadlock with set_multicast_list during
* open.
* 0.16: 15 Nov 2003: include file cleanup for ppc64, rx buffer size
* increased to 1628 bytes.
* 0.17: 16 Nov 2003: undo rx buffer size increase. Substract 1 from
* the tx length.
* 0.18: 17 Nov 2003: fix oops due to late initialization of dev_stats
* 0.19: 29 Nov 2003: Handle RxNoBuf, detect & handle invalid mac
* addresses, really stop rx if already running
* in nv_start_rx, clean up a bit.
* 0.20: 07 Dec 2003: alloc fixes
* 0.21: 12 Jan 2004: additional alloc fix, nic polling fix.
* 0.22: 19 Jan 2004: reprogram timer to a sane rate, avoid lockup
* on close.
* 0.23: 26 Jan 2004: various small cleanups
* 0.24: 27 Feb 2004: make driver even less anonymous in backtraces
* 0.25: 09 Mar 2004: wol support
* 0.26: 03 Jun 2004: netdriver specific annotation, sparse-related fixes
* 0.27: 19 Jun 2004: Gigabit support, new descriptor rings,
* added CK804/MCP04 device IDs, code fixes
* for registers, link status and other minor fixes.
* 0.28: 21 Jun 2004: Big cleanup, making driver mostly endian safe
* 0.29: 31 Aug 2004: Add backup timer for link change notification.
* 0.30: 25 Sep 2004: rx checksum support for nf 250 Gb. Add rx reset
* into nv_close, otherwise reenabling for wol can
* cause DMA to kfree'd memory.
* 0.31: 14 Nov 2004: ethtool support for getting/setting link
* capabilities.
* 0.32: 16 Apr 2005: RX_ERROR4 handling added.
* 0.33: 16 May 2005: Support for MCP51 added.
* 0.34: 18 Jun 2005: Add DEV_NEED_LINKTIMER to all nForce nics.
* 0.35: 26 Jun 2005: Support for MCP55 added.
* 0.36: 28 Jun 2005: Add jumbo frame support.
* 0.37: 10 Jul 2005: Additional ethtool support, cleanup of pci id list
* 0.38: 16 Jul 2005: tx irq rewrite: Use global flags instead of
* per-packet flags.
* 0.39: 18 Jul 2005: Add 64bit descriptor support.
* 0.40: 19 Jul 2005: Add support for mac address change.
* 0.41: 30 Jul 2005: Write back original MAC in nv_close instead
* of nv_remove
* 0.42: 06 Aug 2005: Fix lack of link speed initialization
* in the second (and later) nv_open call
* 0.43: 10 Aug 2005: Add support for tx checksum.
* 0.44: 20 Aug 2005: Add support for scatter gather and segmentation.
* 0.45: 18 Sep 2005: Remove nv_stop/start_rx from every link check
* 0.46: 20 Oct 2005: Add irq optimization modes.
* 0.47: 26 Oct 2005: Add phyaddr 0 in phy scan.
* 0.48: 24 Dec 2005: Disable TSO, bugfix for pci_map_single
* 0.49: 10 Dec 2005: Fix tso for large buffers.
* 0.50: 20 Jan 2006: Add 8021pq tagging support.
* 0.51: 20 Jan 2006: Add 64bit consistent memory allocation for rings.
* 0.52: 20 Jan 2006: Add MSI/MSIX support.
* 0.53: 19 Mar 2006: Fix init from low power mode and add hw reset.
* 0.54: 21 Mar 2006: Fix spin locks for multi irqs and cleanup.
* 0.55: 22 Mar 2006: Add flow control (pause frame).
* 0.56: 22 Mar 2006: Additional ethtool config and moduleparam support.
* 0.57: 14 May 2006: Mac address set in probe/remove and order corrections.
* 0.58: 30 Oct 2006: Added support for sideband management unit.
* 0.59: 30 Oct 2006: Added support for recoverable error.
* 0.60: 20 Jan 2007: Code optimizations for rings, rx & tx data paths, and stats.
*
* Known bugs:
* We suspect that on some hardware no TX done interrupts are generated.
* This means recovery from netif_stop_queue only happens if the hw timer
@ -123,11 +39,6 @@
* DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
* superfluous timer interrupts from the nic.
*/
#ifdef CONFIG_FORCEDETH_NAPI
#define DRIVERNAPI "-NAPI"
#else
#define DRIVERNAPI
#endif
#define FORCEDETH_VERSION "0.61"
#define DRV_NAME "forcedeth"
@ -930,6 +841,13 @@ static inline u32 nv_descr_getlength_ex(struct ring_desc_ex *prd, u32 v)
return le32_to_cpu(prd->flaglen) & LEN_MASK_V2;
}
static bool nv_optimized(struct fe_priv *np)
{
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
return false;
return true;
}
static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
int delay, int delaymax, const char *msg)
{
@ -966,7 +884,7 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if (!nv_optimized(np)) {
if (rxtx_flags & NV_SETUP_RX_RING) {
writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
}
@ -989,7 +907,7 @@ static void free_rings(struct net_device *dev)
{
struct fe_priv *np = get_nvpriv(dev);
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if (!nv_optimized(np)) {
if (np->rx_ring.orig)
pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
np->rx_ring.orig, np->ring_addr);
@ -1435,6 +1353,18 @@ static void nv_stop_tx(struct net_device *dev)
base + NvRegTransmitPoll);
}
static void nv_start_rxtx(struct net_device *dev)
{
nv_start_rx(dev);
nv_start_tx(dev);
}
static void nv_stop_rxtx(struct net_device *dev)
{
nv_stop_rx(dev);
nv_stop_tx(dev);
}
static void nv_txrx_reset(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
@ -1657,7 +1587,7 @@ static void nv_do_rx_refill(unsigned long data)
} else {
disable_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector);
}
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
if (!nv_optimized(np))
retcode = nv_alloc_rx(dev);
else
retcode = nv_alloc_rx_optimized(dev);
@ -1682,8 +1612,10 @@ static void nv_init_rx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
int i;
np->get_rx = np->put_rx = np->first_rx = np->rx_ring;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
if (!nv_optimized(np))
np->last_rx.orig = &np->rx_ring.orig[np->rx_ring_size-1];
else
np->last_rx.ex = &np->rx_ring.ex[np->rx_ring_size-1];
@ -1691,7 +1623,7 @@ static void nv_init_rx(struct net_device *dev)
np->last_rx_ctx = &np->rx_skb[np->rx_ring_size-1];
for (i = 0; i < np->rx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if (!nv_optimized(np)) {
np->rx_ring.orig[i].flaglen = 0;
np->rx_ring.orig[i].buf = 0;
} else {
@ -1709,8 +1641,10 @@ static void nv_init_tx(struct net_device *dev)
{
struct fe_priv *np = netdev_priv(dev);
int i;
np->get_tx = np->put_tx = np->first_tx = np->tx_ring;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
if (!nv_optimized(np))
np->last_tx.orig = &np->tx_ring.orig[np->tx_ring_size-1];
else
np->last_tx.ex = &np->tx_ring.ex[np->tx_ring_size-1];
@ -1721,7 +1655,7 @@ static void nv_init_tx(struct net_device *dev)
np->tx_end_flip = NULL;
for (i = 0; i < np->tx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if (!nv_optimized(np)) {
np->tx_ring.orig[i].flaglen = 0;
np->tx_ring.orig[i].buf = 0;
} else {
@ -1744,7 +1678,8 @@ static int nv_init_ring(struct net_device *dev)
nv_init_tx(dev);
nv_init_rx(dev);
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
if (!nv_optimized(np))
return nv_alloc_rx(dev);
else
return nv_alloc_rx_optimized(dev);
@ -1775,7 +1710,7 @@ static void nv_drain_tx(struct net_device *dev)
unsigned int i;
for (i = 0; i < np->tx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if (!nv_optimized(np)) {
np->tx_ring.orig[i].flaglen = 0;
np->tx_ring.orig[i].buf = 0;
} else {
@ -1802,7 +1737,7 @@ static void nv_drain_rx(struct net_device *dev)
int i;
for (i = 0; i < np->rx_ring_size; i++) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if (!nv_optimized(np)) {
np->rx_ring.orig[i].flaglen = 0;
np->rx_ring.orig[i].buf = 0;
} else {
@ -1823,7 +1758,7 @@ static void nv_drain_rx(struct net_device *dev)
}
}
static void drain_ring(struct net_device *dev)
static void nv_drain_rxtx(struct net_device *dev)
{
nv_drain_tx(dev);
nv_drain_rx(dev);
@ -2260,7 +2195,7 @@ static void nv_tx_timeout(struct net_device *dev)
}
printk(KERN_INFO "%s: Dumping tx ring\n", dev->name);
for (i=0;i<np->tx_ring_size;i+= 4) {
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if (!nv_optimized(np)) {
printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
i,
le32_to_cpu(np->tx_ring.orig[i].buf),
@ -2296,7 +2231,7 @@ static void nv_tx_timeout(struct net_device *dev)
nv_stop_tx(dev);
/* 2) check that the packets were not sent already: */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
if (!nv_optimized(np))
nv_tx_done(dev);
else
nv_tx_done_optimized(dev, np->tx_ring_size);
@ -2663,12 +2598,10 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
netif_tx_lock_bh(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rx(dev);
nv_stop_tx(dev);
nv_stop_rxtx(dev);
nv_txrx_reset(dev);
/* drain rx queue */
nv_drain_rx(dev);
nv_drain_tx(dev);
nv_drain_rxtx(dev);
/* reinit driver view of the rx queue */
set_bufsize(dev);
if (nv_init_ring(dev)) {
@ -2685,8 +2618,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
pci_push(base);
/* restart rx engine */
nv_start_rx(dev);
nv_start_tx(dev);
nv_start_rxtx(dev);
spin_unlock(&np->lock);
netif_tx_unlock_bh(dev);
nv_enable_irq(dev);
@ -3393,7 +3325,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
unsigned long flags;
int pkts, retcode;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if (!nv_optimized(np)) {
pkts = nv_rx_process(dev, budget);
retcode = nv_alloc_rx(dev);
} else {
@ -3634,7 +3566,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
if (intr_test) {
handler = nv_nic_irq_test;
} else {
if (np->desc_ver == DESC_VER_3)
if (nv_optimized(np))
handler = nv_nic_irq_optimized;
else
handler = nv_nic_irq;
@ -3787,12 +3719,10 @@ static void nv_do_nic_poll(unsigned long data)
netif_tx_lock_bh(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rx(dev);
nv_stop_tx(dev);
nv_stop_rxtx(dev);
nv_txrx_reset(dev);
/* drain rx queue */
nv_drain_rx(dev);
nv_drain_tx(dev);
nv_drain_rxtx(dev);
/* reinit driver view of the rx queue */
set_bufsize(dev);
if (nv_init_ring(dev)) {
@ -3809,8 +3739,7 @@ static void nv_do_nic_poll(unsigned long data)
pci_push(base);
/* restart rx engine */
nv_start_rx(dev);
nv_start_tx(dev);
nv_start_rxtx(dev);
spin_unlock(&np->lock);
netif_tx_unlock_bh(dev);
}
@ -3821,7 +3750,7 @@ static void nv_do_nic_poll(unsigned long data)
pci_push(base);
if (!using_multi_irqs(dev)) {
if (np->desc_ver == DESC_VER_3)
if (nv_optimized(np))
nv_nic_irq_optimized(0, dev);
else
nv_nic_irq(0, dev);
@ -3860,7 +3789,8 @@ static void nv_do_stats_poll(unsigned long data)
nv_get_hw_stats(dev);
if (!np->in_shutdown)
mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
mod_timer(&np->stats_poll,
round_jiffies(jiffies + STATS_INTERVAL));
}
static void nv_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@ -4018,8 +3948,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
netif_tx_lock_bh(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rx(dev);
nv_stop_tx(dev);
nv_stop_rxtx(dev);
spin_unlock(&np->lock);
netif_tx_unlock_bh(dev);
}
@ -4125,8 +4054,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
}
if (netif_running(dev)) {
nv_start_rx(dev);
nv_start_tx(dev);
nv_start_rxtx(dev);
nv_enable_irq(dev);
}
@ -4169,8 +4097,7 @@ static int nv_nway_reset(struct net_device *dev)
netif_tx_lock_bh(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rx(dev);
nv_stop_tx(dev);
nv_stop_rxtx(dev);
spin_unlock(&np->lock);
netif_tx_unlock_bh(dev);
printk(KERN_INFO "%s: link down.\n", dev->name);
@ -4190,8 +4117,7 @@ static int nv_nway_reset(struct net_device *dev)
}
if (netif_running(dev)) {
nv_start_rx(dev);
nv_start_tx(dev);
nv_start_rxtx(dev);
nv_enable_irq(dev);
}
ret = 0;
@ -4248,7 +4174,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
}
/* allocate new rings */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if (!nv_optimized(np)) {
rxtx_ring = pci_alloc_consistent(np->pci_dev,
sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
&ring_addr);
@ -4261,7 +4187,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
tx_skbuff = kmalloc(sizeof(struct nv_skb_map) * ring->tx_pending, GFP_KERNEL);
if (!rxtx_ring || !rx_skbuff || !tx_skbuff) {
/* fall back to old rings */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if (!nv_optimized(np)) {
if (rxtx_ring)
pci_free_consistent(np->pci_dev, sizeof(struct ring_desc) * (ring->rx_pending + ring->tx_pending),
rxtx_ring, ring_addr);
@ -4282,12 +4208,10 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
netif_tx_lock_bh(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rx(dev);
nv_stop_tx(dev);
nv_stop_rxtx(dev);
nv_txrx_reset(dev);
/* drain queues */
nv_drain_rx(dev);
nv_drain_tx(dev);
nv_drain_rxtx(dev);
/* delete queues */
free_rings(dev);
}
@ -4295,7 +4219,8 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
/* set new values */
np->rx_ring_size = ring->rx_pending;
np->tx_ring_size = ring->tx_pending;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if (!nv_optimized(np)) {
np->rx_ring.orig = (struct ring_desc*)rxtx_ring;
np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
} else {
@ -4327,8 +4252,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
pci_push(base);
/* restart engines */
nv_start_rx(dev);
nv_start_tx(dev);
nv_start_rxtx(dev);
spin_unlock(&np->lock);
netif_tx_unlock_bh(dev);
nv_enable_irq(dev);
@ -4369,8 +4293,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
netif_tx_lock_bh(dev);
spin_lock(&np->lock);
/* stop engines */
nv_stop_rx(dev);
nv_stop_tx(dev);
nv_stop_rxtx(dev);
spin_unlock(&np->lock);
netif_tx_unlock_bh(dev);
}
@ -4411,8 +4334,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
}
if (netif_running(dev)) {
nv_start_rx(dev);
nv_start_tx(dev);
nv_start_rxtx(dev);
nv_enable_irq(dev);
}
return 0;
@ -4648,8 +4570,7 @@ static int nv_loopback_test(struct net_device *dev)
pci_push(base);
/* restart rx engine */
nv_start_rx(dev);
nv_start_tx(dev);
nv_start_rxtx(dev);
/* setup packet for tx */
pkt_len = ETH_DATA_LEN;
@ -4667,7 +4588,7 @@ static int nv_loopback_test(struct net_device *dev)
for (i = 0; i < pkt_len; i++)
pkt_data[i] = (u8)(i & 0xff);
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if (!nv_optimized(np)) {
np->tx_ring.orig[0].buf = cpu_to_le32(test_dma_addr);
np->tx_ring.orig[0].flaglen = cpu_to_le32((pkt_len-1) | np->tx_flags | tx_flags_extra);
} else {
@ -4681,7 +4602,7 @@ static int nv_loopback_test(struct net_device *dev)
msleep(500);
/* check for rx of the packet */
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if (!nv_optimized(np)) {
flags = le32_to_cpu(np->rx_ring.orig[0].flaglen);
len = nv_descr_getlength(&np->rx_ring.orig[0], np->desc_ver);
@ -4727,12 +4648,10 @@ static int nv_loopback_test(struct net_device *dev)
dev_kfree_skb_any(tx_skb);
out:
/* stop engines */
nv_stop_rx(dev);
nv_stop_tx(dev);
nv_stop_rxtx(dev);
nv_txrx_reset(dev);
/* drain rx queue */
nv_drain_rx(dev);
nv_drain_tx(dev);
nv_drain_rxtx(dev);
if (netif_running(dev)) {
writel(misc1_flags, base + NvRegMisc1);
@ -4770,12 +4689,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
}
/* stop engines */
nv_stop_rx(dev);
nv_stop_tx(dev);
nv_stop_rxtx(dev);
nv_txrx_reset(dev);
/* drain rx queue */
nv_drain_rx(dev);
nv_drain_tx(dev);
nv_drain_rxtx(dev);
spin_unlock_irq(&np->lock);
netif_tx_unlock_bh(dev);
}
@ -4816,8 +4733,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
pci_push(base);
/* restart rx engine */
nv_start_rx(dev);
nv_start_tx(dev);
nv_start_rxtx(dev);
netif_start_queue(dev);
#ifdef CONFIG_FORCEDETH_NAPI
napi_enable(&np->napi);
@ -5046,8 +4962,7 @@ static int nv_open(struct net_device *dev)
* to init hw */
np->linkspeed = 0;
ret = nv_update_linkspeed(dev);
nv_start_rx(dev);
nv_start_tx(dev);
nv_start_rxtx(dev);
netif_start_queue(dev);
#ifdef CONFIG_FORCEDETH_NAPI
napi_enable(&np->napi);
@ -5064,13 +4979,14 @@ static int nv_open(struct net_device *dev)
/* start statistics timer */
if (np->driver_data & (DEV_HAS_STATISTICS_V1|DEV_HAS_STATISTICS_V2))
mod_timer(&np->stats_poll, jiffies + STATS_INTERVAL);
mod_timer(&np->stats_poll,
round_jiffies(jiffies + STATS_INTERVAL));
spin_unlock_irq(&np->lock);
return 0;
out_drain:
drain_ring(dev);
nv_drain_rxtx(dev);
return ret;
}
@ -5093,8 +5009,7 @@ static int nv_close(struct net_device *dev)
netif_stop_queue(dev);
spin_lock_irq(&np->lock);
nv_stop_tx(dev);
nv_stop_rx(dev);
nv_stop_rxtx(dev);
nv_txrx_reset(dev);
/* disable interrupts on the nic or we will lock up */
@ -5107,7 +5022,7 @@ static int nv_close(struct net_device *dev)
nv_free_irq(dev);
drain_ring(dev);
nv_drain_rxtx(dev);
if (np->wolenabled) {
writel(NVREG_PFF_ALWAYS|NVREG_PFF_MYADDR, base + NvRegPacketFilterFlags);
@ -5267,7 +5182,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
np->rx_ring_size = RX_RING_DEFAULT;
np->tx_ring_size = TX_RING_DEFAULT;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
if (!nv_optimized(np)) {
np->rx_ring.orig = pci_alloc_consistent(pci_dev,
sizeof(struct ring_desc) * (np->rx_ring_size + np->tx_ring_size),
&np->ring_addr);
@ -5289,7 +5204,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
dev->open = nv_open;
dev->stop = nv_close;
if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2)
if (!nv_optimized(np))
dev->hard_start_xmit = nv_start_xmit;
else
dev->hard_start_xmit = nv_start_xmit_optimized;

Просмотреть файл

@ -1178,7 +1178,7 @@ static int __devinit find_phy(struct device_node *np,
data = of_get_property(np, "fixed-link", NULL);
if (data) {
snprintf(fpi->bus_id, 16, PHY_ID_FMT, 0, *data);
snprintf(fpi->bus_id, 16, "%x:%02x", 0, *data);
return 0;
}
@ -1202,7 +1202,7 @@ static int __devinit find_phy(struct device_node *np,
if (!data || len != 4)
goto out_put_mdio;
snprintf(fpi->bus_id, 16, PHY_ID_FMT, res.start, *data);
snprintf(fpi->bus_id, 16, "%x:%02x", res.start, *data);
out_put_mdio:
of_node_put(mdionode);

Просмотреть файл

@ -130,7 +130,7 @@ static int __devinit fs_mii_bitbang_init(struct mii_bus *bus,
* we get is an int, and the odds of multiple bitbang mdio buses
* is low enough that it's not worth going too crazy.
*/
bus->id = res.start;
snprintf(bus->id, MII_BUS_ID_SIZE, "%x", res.start);
data = of_get_property(np, "fsl,mdio-pin", &len);
if (!data || len != 4)
@ -307,7 +307,7 @@ static int __devinit fs_enet_mdio_probe(struct device *dev)
return -ENOMEM;
new_bus->name = "BB MII Bus",
new_bus->id = pdev->id;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
new_bus->phy_mask = ~0x9;
pdata = (struct fs_mii_bb_platform_info *)pdev->dev.platform_data;

Просмотреть файл

@ -196,7 +196,7 @@ static int __devinit fs_enet_mdio_probe(struct of_device *ofdev,
if (ret)
return ret;
new_bus->id = res.start;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", res.start);
fec->fecp = ioremap(res.start, res.end - res.start + 1);
if (!fec->fecp)
@ -309,7 +309,7 @@ static int __devinit fs_enet_fec_mdio_probe(struct device *dev)
new_bus->read = &fs_enet_fec_mii_read,
new_bus->write = &fs_enet_fec_mii_write,
new_bus->reset = &fs_enet_fec_mii_reset,
new_bus->id = pdev->id;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
pdata = (struct fs_mii_fec_platform_info *)pdev->dev.platform_data;

Просмотреть файл

@ -1185,7 +1185,7 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
int frame_size = new_mtu + ETH_HLEN;
if (priv->vlan_enable)
frame_size += VLAN_ETH_HLEN;
frame_size += VLAN_HLEN;
if (gfar_uses_fcb(priv))
frame_size += GMAC_FCB_LEN;
@ -1250,17 +1250,12 @@ static void gfar_timeout(struct net_device *dev)
}
/* Interrupt Handler for Transmit complete */
static irqreturn_t gfar_transmit(int irq, void *dev_id)
int gfar_clean_tx_ring(struct net_device *dev)
{
struct net_device *dev = (struct net_device *) dev_id;
struct gfar_private *priv = netdev_priv(dev);
struct txbd8 *bdp;
struct gfar_private *priv = netdev_priv(dev);
int howmany = 0;
/* Clear IEVENT */
gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
/* Lock priv */
spin_lock(&priv->txlock);
bdp = priv->dirty_tx;
while ((bdp->status & TXBD_READY) == 0) {
/* If dirty_tx and cur_tx are the same, then either the */
@ -1269,7 +1264,7 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
if ((bdp == priv->cur_tx) && (netif_queue_stopped(dev) == 0))
break;
dev->stats.tx_packets++;
howmany++;
/* Deferred means some collisions occurred during transmit, */
/* but we eventually sent the packet. */
@ -1278,11 +1273,15 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
/* Free the sk buffer associated with this TxBD */
dev_kfree_skb_irq(priv->tx_skbuff[priv->skb_dirtytx]);
priv->tx_skbuff[priv->skb_dirtytx] = NULL;
priv->skb_dirtytx =
(priv->skb_dirtytx +
1) & TX_RING_MOD_MASK(priv->tx_ring_size);
/* Clean BD length for empty detection */
bdp->length = 0;
/* update bdp to point at next bd in the ring (wrapping if necessary) */
if (bdp->status & TXBD_WRAP)
bdp = priv->tx_bd_base;
@ -1297,13 +1296,32 @@ static irqreturn_t gfar_transmit(int irq, void *dev_id)
netif_wake_queue(dev);
} /* while ((bdp->status & TXBD_READY) == 0) */
dev->stats.tx_packets += howmany;
return howmany;
}
/* Interrupt Handler for Transmit complete */
static irqreturn_t gfar_transmit(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
struct gfar_private *priv = netdev_priv(dev);
/* Clear IEVENT */
gfar_write(&priv->regs->ievent, IEVENT_TX_MASK);
/* Lock priv */
spin_lock(&priv->txlock);
gfar_clean_tx_ring(dev);
/* If we are coalescing the interrupts, reset the timer */
/* Otherwise, clear it */
if (priv->txcoalescing)
if (likely(priv->txcoalescing)) {
gfar_write(&priv->regs->txic, 0);
gfar_write(&priv->regs->txic,
mk_ic_value(priv->txcount, priv->txtime));
else
gfar_write(&priv->regs->txic, 0);
}
spin_unlock(&priv->txlock);
@ -1392,15 +1410,15 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
unsigned long flags;
#endif
/* Clear IEVENT, so rx interrupt isn't called again
* because of this interrupt */
gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
/* support NAPI */
#ifdef CONFIG_GFAR_NAPI
/* Clear IEVENT, so interrupts aren't called again
* because of the packets that have already arrived */
gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK);
if (netif_rx_schedule_prep(dev, &priv->napi)) {
tempval = gfar_read(&priv->regs->imask);
tempval &= IMASK_RX_DISABLED;
tempval &= IMASK_RTX_DISABLED;
gfar_write(&priv->regs->imask, tempval);
__netif_rx_schedule(dev, &priv->napi);
@ -1411,17 +1429,20 @@ irqreturn_t gfar_receive(int irq, void *dev_id)
gfar_read(&priv->regs->imask));
}
#else
/* Clear IEVENT, so rx interrupt isn't called again
* because of this interrupt */
gfar_write(&priv->regs->ievent, IEVENT_RX_MASK);
spin_lock_irqsave(&priv->rxlock, flags);
gfar_clean_rx_ring(dev, priv->rx_ring_size);
/* If we are coalescing interrupts, update the timer */
/* Otherwise, clear it */
if (priv->rxcoalescing)
if (likely(priv->rxcoalescing)) {
gfar_write(&priv->regs->rxic, 0);
gfar_write(&priv->regs->rxic,
mk_ic_value(priv->rxcount, priv->rxtime));
else
gfar_write(&priv->regs->rxic, 0);
}
spin_unlock_irqrestore(&priv->rxlock, flags);
#endif
@ -1526,9 +1547,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
rmb();
skb = priv->rx_skbuff[priv->skb_currx];
if (!(bdp->status &
(RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET
| RXBD_CRCERR | RXBD_OVERRUN | RXBD_TRUNCATED))) {
if ((bdp->status & RXBD_LAST) && !(bdp->status & RXBD_ERR)) {
/* Increment the number of packets */
dev->stats.rx_packets++;
howmany++;
@ -1582,6 +1601,13 @@ static int gfar_poll(struct napi_struct *napi, int budget)
struct gfar_private *priv = container_of(napi, struct gfar_private, napi);
struct net_device *dev = priv->dev;
int howmany;
unsigned long flags;
/* If we fail to get the lock, don't bother with the TX BDs */
if (spin_trylock_irqsave(&priv->txlock, flags)) {
gfar_clean_tx_ring(dev);
spin_unlock_irqrestore(&priv->txlock, flags);
}
howmany = gfar_clean_rx_ring(dev, budget);
@ -1595,11 +1621,11 @@ static int gfar_poll(struct napi_struct *napi, int budget)
/* If we are coalescing interrupts, update the timer */
/* Otherwise, clear it */
if (priv->rxcoalescing)
if (likely(priv->rxcoalescing)) {
gfar_write(&priv->regs->rxic, 0);
gfar_write(&priv->regs->rxic,
mk_ic_value(priv->rxcount, priv->rxtime));
else
gfar_write(&priv->regs->rxic, 0);
}
}
return howmany;

Просмотреть файл

@ -102,7 +102,7 @@ extern const char gfar_driver_version[];
#define DEFAULT_FIFO_TX_STARVE 0x40
#define DEFAULT_FIFO_TX_STARVE_OFF 0x80
#define DEFAULT_BD_STASH 1
#define DEFAULT_STASH_LENGTH 64
#define DEFAULT_STASH_LENGTH 96
#define DEFAULT_STASH_INDEX 0
/* The number of Exact Match registers */
@ -124,11 +124,18 @@ extern const char gfar_driver_version[];
#define DEFAULT_TX_COALESCE 1
#define DEFAULT_TXCOUNT 16
#define DEFAULT_TXTIME 4
#define DEFAULT_TXTIME 21
#define DEFAULT_RXTIME 21
/* Non NAPI Case */
#ifndef CONFIG_GFAR_NAPI
#define DEFAULT_RX_COALESCE 1
#define DEFAULT_RXCOUNT 16
#define DEFAULT_RXTIME 4
#else
#define DEFAULT_RX_COALESCE 0
#define DEFAULT_RXCOUNT 0
#endif /* CONFIG_GFAR_NAPI */
#define TBIPA_VALUE 0x1f
#define MIIMCFG_INIT_VALUE 0x00000007
@ -242,6 +249,7 @@ extern const char gfar_driver_version[];
#define IEVENT_PERR 0x00000001
#define IEVENT_RX_MASK (IEVENT_RXB0 | IEVENT_RXF0)
#define IEVENT_TX_MASK (IEVENT_TXB | IEVENT_TXF)
#define IEVENT_RTX_MASK (IEVENT_RX_MASK | IEVENT_TX_MASK)
#define IEVENT_ERR_MASK \
(IEVENT_RXC | IEVENT_BSY | IEVENT_EBERR | IEVENT_MSRO | \
IEVENT_BABT | IEVENT_TXC | IEVENT_TXE | IEVENT_LC \
@ -269,11 +277,12 @@ extern const char gfar_driver_version[];
#define IMASK_FIQ 0x00000004
#define IMASK_DPE 0x00000002
#define IMASK_PERR 0x00000001
#define IMASK_RX_DISABLED ~(IMASK_RXFEN0 | IMASK_BSY)
#define IMASK_DEFAULT (IMASK_TXEEN | IMASK_TXFEN | IMASK_TXBEN | \
IMASK_RXFEN0 | IMASK_BSY | IMASK_EBERR | IMASK_BABR | \
IMASK_XFUN | IMASK_RXC | IMASK_BABT | IMASK_DPE \
| IMASK_PERR)
#define IMASK_RTX_DISABLED ((~(IMASK_RXFEN0 | IMASK_TXFEN | IMASK_BSY)) \
& IMASK_DEFAULT)
/* Fifo management */
#define FIFO_TX_THR_MASK 0x01ff
@ -340,6 +349,9 @@ extern const char gfar_driver_version[];
#define RXBD_OVERRUN 0x0002
#define RXBD_TRUNCATED 0x0001
#define RXBD_STATS 0x01ff
#define RXBD_ERR (RXBD_LARGE | RXBD_SHORT | RXBD_NONOCTET \
| RXBD_CRCERR | RXBD_OVERRUN \
| RXBD_TRUNCATED)
/* Rx FCB status field bits */
#define RXFCB_VLN 0x8000

Просмотреть файл

@ -173,7 +173,7 @@ int gfar_mdio_probe(struct device *dev)
new_bus->read = &gfar_mdio_read,
new_bus->write = &gfar_mdio_write,
new_bus->reset = &gfar_mdio_reset,
new_bus->id = pdev->id;
snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id);
pdata = (struct gianfar_mdio_data *)pdev->dev.platform_data;

Просмотреть файл

@ -172,7 +172,7 @@ static int bpq_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_ty
struct ethhdr *eth;
struct bpqdev *bpq;
if (dev->nd_net != &init_net)
if (dev_net(dev) != &init_net)
goto drop;
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
@ -553,7 +553,7 @@ static int bpq_device_event(struct notifier_block *this,unsigned long event, voi
{
struct net_device *dev = (struct net_device *)ptr;
if (dev->nd_net != &init_net)
if (dev_net(dev) != &init_net)
return NOTIFY_DONE;
if (!dev_is_ethdev(dev))

Просмотреть файл

@ -1259,26 +1259,7 @@ static void ibmveth_proc_unregister_driver(void)
remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net);
}
static void *ibmveth_seq_start(struct seq_file *seq, loff_t *pos)
{
if (*pos == 0) {
return (void *)1;
} else {
return NULL;
}
}
static void *ibmveth_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return NULL;
}
static void ibmveth_seq_stop(struct seq_file *seq, void *v)
{
}
static int ibmveth_seq_show(struct seq_file *seq, void *v)
static int ibmveth_show(struct seq_file *seq, void *v)
{
struct ibmveth_adapter *adapter = seq->private;
char *current_mac = ((char*) &adapter->netdev->dev_addr);
@ -1302,27 +1283,10 @@ static int ibmveth_seq_show(struct seq_file *seq, void *v)
return 0;
}
static struct seq_operations ibmveth_seq_ops = {
.start = ibmveth_seq_start,
.next = ibmveth_seq_next,
.stop = ibmveth_seq_stop,
.show = ibmveth_seq_show,
};
static int ibmveth_proc_open(struct inode *inode, struct file *file)
{
struct seq_file *seq;
struct proc_dir_entry *proc;
int rc;
rc = seq_open(file, &ibmveth_seq_ops);
if (!rc) {
/* recover the pointer buried in proc_dir_entry data */
seq = file->private_data;
proc = PDE(inode);
seq->private = proc->data;
}
return rc;
return single_open(file, ibmveth_show, PDE(inode)->data);
}
static const struct file_operations ibmveth_proc_fops = {
@ -1330,7 +1294,7 @@ static const struct file_operations ibmveth_proc_fops = {
.open = ibmveth_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
.release = single_release,
};
static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)

Просмотреть файл

@ -117,8 +117,8 @@ struct ixgb_buffer {
struct sk_buff *skb;
dma_addr_t dma;
unsigned long time_stamp;
uint16_t length;
uint16_t next_to_watch;
u16 length;
u16 next_to_watch;
};
struct ixgb_desc_ring {
@ -152,13 +152,12 @@ struct ixgb_desc_ring {
struct ixgb_adapter {
struct timer_list watchdog_timer;
struct vlan_group *vlgrp;
uint32_t bd_number;
uint32_t rx_buffer_len;
uint32_t part_num;
uint16_t link_speed;
uint16_t link_duplex;
u32 bd_number;
u32 rx_buffer_len;
u32 part_num;
u16 link_speed;
u16 link_duplex;
spinlock_t tx_lock;
atomic_t irq_sem;
struct work_struct tx_timeout_task;
struct timer_list blink_timer;
@ -168,20 +167,20 @@ struct ixgb_adapter {
struct ixgb_desc_ring tx_ring ____cacheline_aligned_in_smp;
unsigned int restart_queue;
unsigned long timeo_start;
uint32_t tx_cmd_type;
uint64_t hw_csum_tx_good;
uint64_t hw_csum_tx_error;
uint32_t tx_int_delay;
uint32_t tx_timeout_count;
boolean_t tx_int_delay_enable;
boolean_t detect_tx_hung;
u32 tx_cmd_type;
u64 hw_csum_tx_good;
u64 hw_csum_tx_error;
u32 tx_int_delay;
u32 tx_timeout_count;
bool tx_int_delay_enable;
bool detect_tx_hung;
/* RX */
struct ixgb_desc_ring rx_ring;
uint64_t hw_csum_rx_error;
uint64_t hw_csum_rx_good;
uint32_t rx_int_delay;
boolean_t rx_csum;
u64 hw_csum_rx_error;
u64 hw_csum_rx_good;
u32 rx_int_delay;
bool rx_csum;
/* OS defined structs */
struct napi_struct napi;
@ -193,8 +192,17 @@ struct ixgb_adapter {
struct ixgb_hw hw;
u16 msg_enable;
struct ixgb_hw_stats stats;
uint32_t alloc_rx_buff_failed;
boolean_t have_msi;
u32 alloc_rx_buff_failed;
bool have_msi;
unsigned long flags;
};
enum ixgb_state_t {
/* TBD
__IXGB_TESTING,
__IXGB_RESETTING,
*/
__IXGB_DOWN
};
/* Exported from other modules */
@ -203,4 +211,14 @@ extern void ixgb_set_ethtool_ops(struct net_device *netdev);
extern char ixgb_driver_name[];
extern const char ixgb_driver_version[];
extern int ixgb_up(struct ixgb_adapter *adapter);
extern void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
extern void ixgb_reset(struct ixgb_adapter *adapter);
extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
extern void ixgb_update_stats(struct ixgb_adapter *adapter);
#endif /* _IXGB_H_ */

Просмотреть файл

@ -29,14 +29,14 @@
#include "ixgb_hw.h"
#include "ixgb_ee.h"
/* Local prototypes */
static uint16_t ixgb_shift_in_bits(struct ixgb_hw *hw);
static u16 ixgb_shift_in_bits(struct ixgb_hw *hw);
static void ixgb_shift_out_bits(struct ixgb_hw *hw,
uint16_t data,
uint16_t count);
u16 data,
u16 count);
static void ixgb_standby_eeprom(struct ixgb_hw *hw);
static boolean_t ixgb_wait_eeprom_command(struct ixgb_hw *hw);
static bool ixgb_wait_eeprom_command(struct ixgb_hw *hw);
static void ixgb_cleanup_eeprom(struct ixgb_hw *hw);
@ -48,7 +48,7 @@ static void ixgb_cleanup_eeprom(struct ixgb_hw *hw);
*****************************************************************************/
static void
ixgb_raise_clock(struct ixgb_hw *hw,
uint32_t *eecd_reg)
u32 *eecd_reg)
{
/* Raise the clock input to the EEPROM (by setting the SK bit), and then
* wait 50 microseconds.
@ -67,7 +67,7 @@ ixgb_raise_clock(struct ixgb_hw *hw,
*****************************************************************************/
static void
ixgb_lower_clock(struct ixgb_hw *hw,
uint32_t *eecd_reg)
u32 *eecd_reg)
{
/* Lower the clock input to the EEPROM (by clearing the SK bit), and then
* wait 50 microseconds.
@ -87,11 +87,11 @@ ixgb_lower_clock(struct ixgb_hw *hw,
*****************************************************************************/
static void
ixgb_shift_out_bits(struct ixgb_hw *hw,
uint16_t data,
uint16_t count)
u16 data,
u16 count)
{
uint32_t eecd_reg;
uint32_t mask;
u32 eecd_reg;
u32 mask;
/* We need to shift "count" bits out to the EEPROM. So, value in the
* "data" parameter will be shifted out to the EEPROM one bit at a time.
@ -133,12 +133,12 @@ ixgb_shift_out_bits(struct ixgb_hw *hw,
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static uint16_t
static u16
ixgb_shift_in_bits(struct ixgb_hw *hw)
{
uint32_t eecd_reg;
uint32_t i;
uint16_t data;
u32 eecd_reg;
u32 i;
u16 data;
/* In order to read a register from the EEPROM, we need to shift 16 bits
* in from the EEPROM. Bits are "shifted in" by raising the clock input to
@ -179,7 +179,7 @@ ixgb_shift_in_bits(struct ixgb_hw *hw)
static void
ixgb_setup_eeprom(struct ixgb_hw *hw)
{
uint32_t eecd_reg;
u32 eecd_reg;
eecd_reg = IXGB_READ_REG(hw, EECD);
@ -201,7 +201,7 @@ ixgb_setup_eeprom(struct ixgb_hw *hw)
static void
ixgb_standby_eeprom(struct ixgb_hw *hw)
{
uint32_t eecd_reg;
u32 eecd_reg;
eecd_reg = IXGB_READ_REG(hw, EECD);
@ -235,7 +235,7 @@ ixgb_standby_eeprom(struct ixgb_hw *hw)
static void
ixgb_clock_eeprom(struct ixgb_hw *hw)
{
uint32_t eecd_reg;
u32 eecd_reg;
eecd_reg = IXGB_READ_REG(hw, EECD);
@ -259,7 +259,7 @@ ixgb_clock_eeprom(struct ixgb_hw *hw)
static void
ixgb_cleanup_eeprom(struct ixgb_hw *hw)
{
uint32_t eecd_reg;
u32 eecd_reg;
eecd_reg = IXGB_READ_REG(hw, EECD);
@ -279,14 +279,14 @@ ixgb_cleanup_eeprom(struct ixgb_hw *hw)
* The command is done when the EEPROM's data out pin goes high.
*
* Returns:
* TRUE: EEPROM data pin is high before timeout.
* FALSE: Time expired.
* true: EEPROM data pin is high before timeout.
* false: Time expired.
*****************************************************************************/
static boolean_t
static bool
ixgb_wait_eeprom_command(struct ixgb_hw *hw)
{
uint32_t eecd_reg;
uint32_t i;
u32 eecd_reg;
u32 i;
/* Toggle the CS line. This in effect tells to EEPROM to actually execute
* the command in question.
@ -301,12 +301,12 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
eecd_reg = IXGB_READ_REG(hw, EECD);
if(eecd_reg & IXGB_EECD_DO)
return (TRUE);
return (true);
udelay(50);
}
ASSERT(0);
return (FALSE);
return (false);
}
/******************************************************************************
@ -319,22 +319,22 @@ ixgb_wait_eeprom_command(struct ixgb_hw *hw)
* valid.
*
* Returns:
* TRUE: Checksum is valid
* FALSE: Checksum is not valid.
* true: Checksum is valid
* false: Checksum is not valid.
*****************************************************************************/
boolean_t
bool
ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
{
uint16_t checksum = 0;
uint16_t i;
u16 checksum = 0;
u16 i;
for(i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++)
checksum += ixgb_read_eeprom(hw, i);
if(checksum == (uint16_t) EEPROM_SUM)
return (TRUE);
if(checksum == (u16) EEPROM_SUM)
return (true);
else
return (FALSE);
return (false);
}
/******************************************************************************
@ -348,13 +348,13 @@ ixgb_validate_eeprom_checksum(struct ixgb_hw *hw)
void
ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
{
uint16_t checksum = 0;
uint16_t i;
u16 checksum = 0;
u16 i;
for(i = 0; i < EEPROM_CHECKSUM_REG; i++)
checksum += ixgb_read_eeprom(hw, i);
checksum = (uint16_t) EEPROM_SUM - checksum;
checksum = (u16) EEPROM_SUM - checksum;
ixgb_write_eeprom(hw, EEPROM_CHECKSUM_REG, checksum);
return;
@ -372,7 +372,7 @@ ixgb_update_eeprom_checksum(struct ixgb_hw *hw)
*
*****************************************************************************/
void
ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
ixgb_write_eeprom(struct ixgb_hw *hw, u16 offset, u16 data)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
@ -425,11 +425,11 @@ ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
* Returns:
* The 16-bit value read from the eeprom
*****************************************************************************/
uint16_t
u16
ixgb_read_eeprom(struct ixgb_hw *hw,
uint16_t offset)
u16 offset)
{
uint16_t data;
u16 data;
/* Prepare the EEPROM for reading */
ixgb_setup_eeprom(hw);
@ -457,14 +457,14 @@ ixgb_read_eeprom(struct ixgb_hw *hw,
* hw - Struct containing variables accessed by shared code
*
* Returns:
* TRUE: if eeprom read is successful
* FALSE: otherwise.
* true: if eeprom read is successful
* false: otherwise.
*****************************************************************************/
boolean_t
bool
ixgb_get_eeprom_data(struct ixgb_hw *hw)
{
uint16_t i;
uint16_t checksum = 0;
u16 i;
u16 checksum = 0;
struct ixgb_ee_map_type *ee_map;
DEBUGFUNC("ixgb_get_eeprom_data");
@ -473,27 +473,27 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
DEBUGOUT("ixgb_ee: Reading eeprom data\n");
for(i = 0; i < IXGB_EEPROM_SIZE ; i++) {
uint16_t ee_data;
u16 ee_data;
ee_data = ixgb_read_eeprom(hw, i);
checksum += ee_data;
hw->eeprom[i] = cpu_to_le16(ee_data);
}
if (checksum != (uint16_t) EEPROM_SUM) {
if (checksum != (u16) EEPROM_SUM) {
DEBUGOUT("ixgb_ee: Checksum invalid.\n");
/* clear the init_ctrl_reg_1 to signify that the cache is
* invalidated */
ee_map->init_ctrl_reg_1 = cpu_to_le16(EEPROM_ICW1_SIGNATURE_CLEAR);
return (FALSE);
return (false);
}
if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
!= cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
DEBUGOUT("ixgb_ee: Signature invalid.\n");
return(FALSE);
return(false);
}
return(TRUE);
return(true);
}
/******************************************************************************
@ -503,17 +503,17 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
* hw - Struct containing variables accessed by shared code
*
* Returns:
* TRUE: eeprom signature was good and the eeprom read was successful
* FALSE: otherwise.
* true: eeprom signature was good and the eeprom read was successful
* false: otherwise.
******************************************************************************/
static boolean_t
static bool
ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if ((ee_map->init_ctrl_reg_1 & cpu_to_le16(EEPROM_ICW1_SIGNATURE_MASK))
== cpu_to_le16(EEPROM_ICW1_SIGNATURE_VALID)) {
return (TRUE);
return (true);
} else {
return ixgb_get_eeprom_data(hw);
}
@ -529,11 +529,11 @@ ixgb_check_and_get_eeprom_data (struct ixgb_hw* hw)
* Word at indexed offset in eeprom, if valid, 0 otherwise.
******************************************************************************/
__le16
ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index)
ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index)
{
if ((index < IXGB_EEPROM_SIZE) &&
(ixgb_check_and_get_eeprom_data(hw) == TRUE)) {
(ixgb_check_and_get_eeprom_data(hw) == true)) {
return(hw->eeprom[index]);
}
@ -550,14 +550,14 @@ ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index)
******************************************************************************/
void
ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
uint8_t *mac_addr)
u8 *mac_addr)
{
int i;
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
DEBUGFUNC("ixgb_get_ee_mac_addr");
if (ixgb_check_and_get_eeprom_data(hw) == TRUE) {
if (ixgb_check_and_get_eeprom_data(hw) == true) {
for (i = 0; i < IXGB_ETH_LENGTH_OF_ADDRESS; i++) {
mac_addr[i] = ee_map->mac_addr[i];
DEBUGOUT2("mac(%d) = %.2X\n", i, mac_addr[i]);
@ -574,10 +574,10 @@ ixgb_get_ee_mac_addr(struct ixgb_hw *hw,
* Returns:
* PBA number if EEPROM contents are valid, 0 otherwise
******************************************************************************/
uint32_t
u32
ixgb_get_ee_pba_number(struct ixgb_hw *hw)
{
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
if (ixgb_check_and_get_eeprom_data(hw) == true)
return (le16_to_cpu(hw->eeprom[EEPROM_PBA_1_2_REG])
| (le16_to_cpu(hw->eeprom[EEPROM_PBA_3_4_REG])<<16));
@ -593,12 +593,12 @@ ixgb_get_ee_pba_number(struct ixgb_hw *hw)
* Returns:
* Device Id if EEPROM contents are valid, 0 otherwise
******************************************************************************/
uint16_t
u16
ixgb_get_ee_device_id(struct ixgb_hw *hw)
{
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
if (ixgb_check_and_get_eeprom_data(hw) == true)
return (le16_to_cpu(ee_map->device_id));
return (0);

Просмотреть файл

@ -75,7 +75,7 @@
/* EEPROM structure */
struct ixgb_ee_map_type {
uint8_t mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS];
u8 mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS];
__le16 compatibility;
__le16 reserved1[4];
__le32 pba_number;
@ -88,19 +88,19 @@ struct ixgb_ee_map_type {
__le16 oem_reserved[16];
__le16 swdpins_reg;
__le16 circuit_ctrl_reg;
uint8_t d3_power;
uint8_t d0_power;
u8 d3_power;
u8 d0_power;
__le16 reserved2[28];
__le16 checksum;
};
/* EEPROM Functions */
uint16_t ixgb_read_eeprom(struct ixgb_hw *hw, uint16_t reg);
u16 ixgb_read_eeprom(struct ixgb_hw *hw, u16 reg);
boolean_t ixgb_validate_eeprom_checksum(struct ixgb_hw *hw);
bool ixgb_validate_eeprom_checksum(struct ixgb_hw *hw);
void ixgb_update_eeprom_checksum(struct ixgb_hw *hw);
void ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t reg, uint16_t data);
void ixgb_write_eeprom(struct ixgb_hw *hw, u16 reg, u16 data);
#endif /* IXGB_EE_H */

Просмотреть файл

@ -32,15 +32,6 @@
#include <asm/uaccess.h>
extern int ixgb_up(struct ixgb_adapter *adapter);
extern void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
extern void ixgb_reset(struct ixgb_adapter *adapter);
extern int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
extern int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
extern void ixgb_free_rx_resources(struct ixgb_adapter *adapter);
extern void ixgb_free_tx_resources(struct ixgb_adapter *adapter);
extern void ixgb_update_stats(struct ixgb_adapter *adapter);
#define IXGB_ALL_RAR_ENTRIES 16
struct ixgb_stats {
@ -136,7 +127,7 @@ ixgb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
return -EINVAL;
if(netif_running(adapter->netdev)) {
ixgb_down(adapter, TRUE);
ixgb_down(adapter, true);
ixgb_reset(adapter);
ixgb_up(adapter);
ixgb_set_speed_duplex(netdev);
@ -185,7 +176,7 @@ ixgb_set_pauseparam(struct net_device *netdev,
hw->fc.type = ixgb_fc_none;
if(netif_running(adapter->netdev)) {
ixgb_down(adapter, TRUE);
ixgb_down(adapter, true);
ixgb_up(adapter);
ixgb_set_speed_duplex(netdev);
} else
@ -194,7 +185,7 @@ ixgb_set_pauseparam(struct net_device *netdev,
return 0;
}
static uint32_t
static u32
ixgb_get_rx_csum(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
@ -203,14 +194,14 @@ ixgb_get_rx_csum(struct net_device *netdev)
}
static int
ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
ixgb_set_rx_csum(struct net_device *netdev, u32 data)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
adapter->rx_csum = data;
if(netif_running(netdev)) {
ixgb_down(adapter,TRUE);
ixgb_down(adapter, true);
ixgb_up(adapter);
ixgb_set_speed_duplex(netdev);
} else
@ -218,14 +209,14 @@ ixgb_set_rx_csum(struct net_device *netdev, uint32_t data)
return 0;
}
static uint32_t
static u32
ixgb_get_tx_csum(struct net_device *netdev)
{
return (netdev->features & NETIF_F_HW_CSUM) != 0;
}
static int
ixgb_set_tx_csum(struct net_device *netdev, uint32_t data)
ixgb_set_tx_csum(struct net_device *netdev, u32 data)
{
if (data)
netdev->features |= NETIF_F_HW_CSUM;
@ -236,7 +227,7 @@ ixgb_set_tx_csum(struct net_device *netdev, uint32_t data)
}
static int
ixgb_set_tso(struct net_device *netdev, uint32_t data)
ixgb_set_tso(struct net_device *netdev, u32 data)
{
if(data)
netdev->features |= NETIF_F_TSO;
@ -245,7 +236,7 @@ ixgb_set_tso(struct net_device *netdev, uint32_t data)
return 0;
}
static uint32_t
static u32
ixgb_get_msglevel(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
@ -253,7 +244,7 @@ ixgb_get_msglevel(struct net_device *netdev)
}
static void
ixgb_set_msglevel(struct net_device *netdev, uint32_t data)
ixgb_set_msglevel(struct net_device *netdev, u32 data)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
adapter->msg_enable = data;
@ -263,7 +254,7 @@ ixgb_set_msglevel(struct net_device *netdev, uint32_t data)
static int
ixgb_get_regs_len(struct net_device *netdev)
{
#define IXGB_REG_DUMP_LEN 136*sizeof(uint32_t)
#define IXGB_REG_DUMP_LEN 136*sizeof(u32)
return IXGB_REG_DUMP_LEN;
}
@ -273,9 +264,9 @@ ixgb_get_regs(struct net_device *netdev,
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
uint32_t *reg = p;
uint32_t *reg_start = reg;
uint8_t i;
u32 *reg = p;
u32 *reg_start = reg;
u8 i;
/* the 1 (one) below indicates an attempt at versioning, if the
* interface in ethtool or the driver changes, this 1 should be
@ -404,7 +395,7 @@ ixgb_get_regs(struct net_device *netdev,
*reg++ = IXGB_GET_STAT(adapter, xofftxc); /* 134 */
*reg++ = IXGB_GET_STAT(adapter, rjc); /* 135 */
regs->len = (reg - reg_start) * sizeof(uint32_t);
regs->len = (reg - reg_start) * sizeof(u32);
}
static int
@ -416,7 +407,7 @@ ixgb_get_eeprom_len(struct net_device *netdev)
static int
ixgb_get_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, uint8_t *bytes)
struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
@ -454,7 +445,7 @@ ixgb_get_eeprom(struct net_device *netdev,
eeprom_buff[i] = ixgb_get_eeprom_word(hw, (first_word + i));
}
memcpy(bytes, (uint8_t *)eeprom_buff + (eeprom->offset & 1),
memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
eeprom->len);
kfree(eeprom_buff);
@ -464,14 +455,14 @@ geeprom_error:
static int
ixgb_set_eeprom(struct net_device *netdev,
struct ethtool_eeprom *eeprom, uint8_t *bytes)
struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
uint16_t *eeprom_buff;
u16 *eeprom_buff;
void *ptr;
int max_len, first_word, last_word;
uint16_t i;
u16 i;
if(eeprom->len == 0)
return -EINVAL;
@ -570,14 +561,14 @@ ixgb_set_ringparam(struct net_device *netdev,
return -EINVAL;
if(netif_running(adapter->netdev))
ixgb_down(adapter,TRUE);
ixgb_down(adapter, true);
rxdr->count = max(ring->rx_pending,(uint32_t)MIN_RXD);
rxdr->count = min(rxdr->count,(uint32_t)MAX_RXD);
rxdr->count = max(ring->rx_pending,(u32)MIN_RXD);
rxdr->count = min(rxdr->count,(u32)MAX_RXD);
rxdr->count = ALIGN(rxdr->count, IXGB_REQ_RX_DESCRIPTOR_MULTIPLE);
txdr->count = max(ring->tx_pending,(uint32_t)MIN_TXD);
txdr->count = min(txdr->count,(uint32_t)MAX_TXD);
txdr->count = max(ring->tx_pending,(u32)MIN_TXD);
txdr->count = min(txdr->count,(u32)MAX_TXD);
txdr->count = ALIGN(txdr->count, IXGB_REQ_TX_DESCRIPTOR_MULTIPLE);
if(netif_running(adapter->netdev)) {
@ -633,7 +624,7 @@ ixgb_led_blink_callback(unsigned long data)
}
static int
ixgb_phys_id(struct net_device *netdev, uint32_t data)
ixgb_phys_id(struct net_device *netdev, u32 data)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
@ -669,7 +660,7 @@ ixgb_get_sset_count(struct net_device *netdev, int sset)
static void
ixgb_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data)
struct ethtool_stats *stats, u64 *data)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
int i;
@ -678,12 +669,12 @@ ixgb_get_ethtool_stats(struct net_device *netdev,
for(i = 0; i < IXGB_STATS_LEN; i++) {
char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset;
data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
}
static void
ixgb_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
ixgb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
int i;

Просмотреть файл

@ -35,13 +35,13 @@
/* Local function prototypes */
static uint32_t ixgb_hash_mc_addr(struct ixgb_hw *hw, uint8_t * mc_addr);
static u32 ixgb_hash_mc_addr(struct ixgb_hw *hw, u8 * mc_addr);
static void ixgb_mta_set(struct ixgb_hw *hw, uint32_t hash_value);
static void ixgb_mta_set(struct ixgb_hw *hw, u32 hash_value);
static void ixgb_get_bus_info(struct ixgb_hw *hw);
static boolean_t ixgb_link_reset(struct ixgb_hw *hw);
static bool ixgb_link_reset(struct ixgb_hw *hw);
static void ixgb_optics_reset(struct ixgb_hw *hw);
@ -55,18 +55,18 @@ static void ixgb_clear_vfta(struct ixgb_hw *hw);
static void ixgb_init_rx_addrs(struct ixgb_hw *hw);
static uint16_t ixgb_read_phy_reg(struct ixgb_hw *hw,
uint32_t reg_address,
uint32_t phy_address,
uint32_t device_type);
static u16 ixgb_read_phy_reg(struct ixgb_hw *hw,
u32 reg_address,
u32 phy_address,
u32 device_type);
static boolean_t ixgb_setup_fc(struct ixgb_hw *hw);
static bool ixgb_setup_fc(struct ixgb_hw *hw);
static boolean_t mac_addr_valid(uint8_t *mac_addr);
static bool mac_addr_valid(u8 *mac_addr);
static uint32_t ixgb_mac_reset(struct ixgb_hw *hw)
static u32 ixgb_mac_reset(struct ixgb_hw *hw)
{
uint32_t ctrl_reg;
u32 ctrl_reg;
ctrl_reg = IXGB_CTRL0_RST |
IXGB_CTRL0_SDP3_DIR | /* All pins are Output=1 */
@ -114,11 +114,11 @@ static uint32_t ixgb_mac_reset(struct ixgb_hw *hw)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
boolean_t
bool
ixgb_adapter_stop(struct ixgb_hw *hw)
{
uint32_t ctrl_reg;
uint32_t icr_reg;
u32 ctrl_reg;
u32 icr_reg;
DEBUGFUNC("ixgb_adapter_stop");
@ -127,13 +127,13 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
*/
if(hw->adapter_stopped) {
DEBUGOUT("Exiting because the adapter is already stopped!!!\n");
return FALSE;
return false;
}
/* Set the Adapter Stopped flag so other driver functions stop
* touching the Hardware.
*/
hw->adapter_stopped = TRUE;
hw->adapter_stopped = true;
/* Clear interrupt mask to stop board from generating interrupts */
DEBUGOUT("Masking off all interrupts\n");
@ -179,8 +179,8 @@ ixgb_adapter_stop(struct ixgb_hw *hw)
static ixgb_xpak_vendor
ixgb_identify_xpak_vendor(struct ixgb_hw *hw)
{
uint32_t i;
uint16_t vendor_name[5];
u32 i;
u16 vendor_name[5];
ixgb_xpak_vendor xpak_vendor;
DEBUGFUNC("ixgb_identify_xpak_vendor");
@ -286,15 +286,15 @@ ixgb_identify_phy(struct ixgb_hw *hw)
* Leaves the transmit and receive units disabled and uninitialized.
*
* Returns:
* TRUE if successful,
* FALSE if unrecoverable problems were encountered.
* true if successful,
* false if unrecoverable problems were encountered.
*****************************************************************************/
boolean_t
bool
ixgb_init_hw(struct ixgb_hw *hw)
{
uint32_t i;
uint32_t ctrl_reg;
boolean_t status;
u32 i;
u32 ctrl_reg;
bool status;
DEBUGFUNC("ixgb_init_hw");
@ -318,9 +318,8 @@ ixgb_init_hw(struct ixgb_hw *hw)
/* Delay a few ms just to allow the reset to complete */
msleep(IXGB_DELAY_AFTER_EE_RESET);
if (ixgb_get_eeprom_data(hw) == FALSE) {
return(FALSE);
}
if (!ixgb_get_eeprom_data(hw))
return false;
/* Use the device id to determine the type of phy/transceiver. */
hw->device_id = ixgb_get_ee_device_id(hw);
@ -337,11 +336,11 @@ ixgb_init_hw(struct ixgb_hw *hw)
*/
if (!mac_addr_valid(hw->curr_mac_addr)) {
DEBUGOUT("MAC address invalid after ixgb_init_rx_addrs\n");
return(FALSE);
return(false);
}
/* tell the routines in this file they can access hardware again */
hw->adapter_stopped = FALSE;
hw->adapter_stopped = false;
/* Fill in the bus_info structure */
ixgb_get_bus_info(hw);
@ -378,7 +377,7 @@ ixgb_init_hw(struct ixgb_hw *hw)
static void
ixgb_init_rx_addrs(struct ixgb_hw *hw)
{
uint32_t i;
u32 i;
DEBUGFUNC("ixgb_init_rx_addrs");
@ -438,13 +437,13 @@ ixgb_init_rx_addrs(struct ixgb_hw *hw)
*****************************************************************************/
void
ixgb_mc_addr_list_update(struct ixgb_hw *hw,
uint8_t *mc_addr_list,
uint32_t mc_addr_count,
uint32_t pad)
u8 *mc_addr_list,
u32 mc_addr_count,
u32 pad)
{
uint32_t hash_value;
uint32_t i;
uint32_t rar_used_count = 1; /* RAR[0] is used for our MAC address */
u32 hash_value;
u32 i;
u32 rar_used_count = 1; /* RAR[0] is used for our MAC address */
DEBUGFUNC("ixgb_mc_addr_list_update");
@ -516,11 +515,11 @@ ixgb_mc_addr_list_update(struct ixgb_hw *hw,
* Returns:
* The hash value
*****************************************************************************/
static uint32_t
static u32
ixgb_hash_mc_addr(struct ixgb_hw *hw,
uint8_t *mc_addr)
u8 *mc_addr)
{
uint32_t hash_value = 0;
u32 hash_value = 0;
DEBUGFUNC("ixgb_hash_mc_addr");
@ -534,18 +533,18 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
case 0:
/* [47:36] i.e. 0x563 for above example address */
hash_value =
((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
((mc_addr[4] >> 4) | (((u16) mc_addr[5]) << 4));
break;
case 1: /* [46:35] i.e. 0xAC6 for above example address */
hash_value =
((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5));
((mc_addr[4] >> 3) | (((u16) mc_addr[5]) << 5));
break;
case 2: /* [45:34] i.e. 0x5D8 for above example address */
hash_value =
((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
((mc_addr[4] >> 2) | (((u16) mc_addr[5]) << 6));
break;
case 3: /* [43:32] i.e. 0x634 for above example address */
hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8));
hash_value = ((mc_addr[4]) | (((u16) mc_addr[5]) << 8));
break;
default:
/* Invalid mc_filter_type, what should we do? */
@ -566,10 +565,10 @@ ixgb_hash_mc_addr(struct ixgb_hw *hw,
*****************************************************************************/
static void
ixgb_mta_set(struct ixgb_hw *hw,
uint32_t hash_value)
u32 hash_value)
{
uint32_t hash_bit, hash_reg;
uint32_t mta_reg;
u32 hash_bit, hash_reg;
u32 mta_reg;
/* The MTA is a register array of 128 32-bit registers.
* It is treated like an array of 4096 bits. We want to set
@ -600,23 +599,23 @@ ixgb_mta_set(struct ixgb_hw *hw,
*****************************************************************************/
void
ixgb_rar_set(struct ixgb_hw *hw,
uint8_t *addr,
uint32_t index)
u8 *addr,
u32 index)
{
uint32_t rar_low, rar_high;
u32 rar_low, rar_high;
DEBUGFUNC("ixgb_rar_set");
/* HW expects these in little endian so we reverse the byte order
* from network order (big endian) to little endian
*/
rar_low = ((uint32_t) addr[0] |
((uint32_t)addr[1] << 8) |
((uint32_t)addr[2] << 16) |
((uint32_t)addr[3] << 24));
rar_low = ((u32) addr[0] |
((u32)addr[1] << 8) |
((u32)addr[2] << 16) |
((u32)addr[3] << 24));
rar_high = ((uint32_t) addr[4] |
((uint32_t)addr[5] << 8) |
rar_high = ((u32) addr[4] |
((u32)addr[5] << 8) |
IXGB_RAH_AV);
IXGB_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
@ -633,8 +632,8 @@ ixgb_rar_set(struct ixgb_hw *hw,
*****************************************************************************/
void
ixgb_write_vfta(struct ixgb_hw *hw,
uint32_t offset,
uint32_t value)
u32 offset,
u32 value)
{
IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, value);
return;
@ -648,7 +647,7 @@ ixgb_write_vfta(struct ixgb_hw *hw,
static void
ixgb_clear_vfta(struct ixgb_hw *hw)
{
uint32_t offset;
u32 offset;
for(offset = 0; offset < IXGB_VLAN_FILTER_TBL_SIZE; offset++)
IXGB_WRITE_REG_ARRAY(hw, VFTA, offset, 0);
@ -661,12 +660,12 @@ ixgb_clear_vfta(struct ixgb_hw *hw)
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static boolean_t
static bool
ixgb_setup_fc(struct ixgb_hw *hw)
{
uint32_t ctrl_reg;
uint32_t pap_reg = 0; /* by default, assume no pause time */
boolean_t status = TRUE;
u32 ctrl_reg;
u32 pap_reg = 0; /* by default, assume no pause time */
bool status = true;
DEBUGFUNC("ixgb_setup_fc");
@ -763,15 +762,15 @@ ixgb_setup_fc(struct ixgb_hw *hw)
* This requires that first an address cycle command is sent, followed by a
* read command.
*****************************************************************************/
static uint16_t
static u16
ixgb_read_phy_reg(struct ixgb_hw *hw,
uint32_t reg_address,
uint32_t phy_address,
uint32_t device_type)
u32 reg_address,
u32 phy_address,
u32 device_type)
{
uint32_t i;
uint32_t data;
uint32_t command = 0;
u32 i;
u32 data;
u32 command = 0;
ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
@ -836,7 +835,7 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
*/
data = IXGB_READ_REG(hw, MSRWD);
data >>= IXGB_MSRWD_READ_DATA_SHIFT;
return((uint16_t) data);
return((u16) data);
}
/******************************************************************************
@ -858,20 +857,20 @@ ixgb_read_phy_reg(struct ixgb_hw *hw,
*****************************************************************************/
static void
ixgb_write_phy_reg(struct ixgb_hw *hw,
uint32_t reg_address,
uint32_t phy_address,
uint32_t device_type,
uint16_t data)
u32 reg_address,
u32 phy_address,
u32 device_type,
u16 data)
{
uint32_t i;
uint32_t command = 0;
u32 i;
u32 command = 0;
ASSERT(reg_address <= IXGB_MAX_PHY_REG_ADDRESS);
ASSERT(phy_address <= IXGB_MAX_PHY_ADDRESS);
ASSERT(device_type <= IXGB_MAX_PHY_DEV_TYPE);
/* Put the data in the MDIO Read/Write Data register */
IXGB_WRITE_REG(hw, MSRWD, (uint32_t)data);
IXGB_WRITE_REG(hw, MSRWD, (u32)data);
/* Setup and write the address cycle command */
command = ((reg_address << IXGB_MSCA_NP_ADDR_SHIFT) |
@ -940,8 +939,8 @@ ixgb_write_phy_reg(struct ixgb_hw *hw,
void
ixgb_check_for_link(struct ixgb_hw *hw)
{
uint32_t status_reg;
uint32_t xpcss_reg;
u32 status_reg;
u32 xpcss_reg;
DEBUGFUNC("ixgb_check_for_link");
@ -950,7 +949,7 @@ ixgb_check_for_link(struct ixgb_hw *hw)
if ((xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
(status_reg & IXGB_STATUS_LU)) {
hw->link_up = TRUE;
hw->link_up = true;
} else if (!(xpcss_reg & IXGB_XPCSS_ALIGN_STATUS) &&
(status_reg & IXGB_STATUS_LU)) {
DEBUGOUT("XPCSS Not Aligned while Status:LU is set.\n");
@ -974,10 +973,10 @@ ixgb_check_for_link(struct ixgb_hw *hw)
*
* Called by any function that needs to check the link status of the adapter.
*****************************************************************************/
boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw)
bool ixgb_check_for_bad_link(struct ixgb_hw *hw)
{
uint32_t newLFC, newRFC;
boolean_t bad_link_returncode = FALSE;
u32 newLFC, newRFC;
bool bad_link_returncode = false;
if (hw->phy_type == ixgb_phy_type_txn17401) {
newLFC = IXGB_READ_REG(hw, LFC);
@ -986,7 +985,7 @@ boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw)
|| (hw->lastRFC + 250 < newRFC)) {
DEBUGOUT
("BAD LINK! too many LFC/RFC since last check\n");
bad_link_returncode = TRUE;
bad_link_returncode = true;
}
hw->lastLFC = newLFC;
hw->lastRFC = newRFC;
@ -1003,7 +1002,7 @@ boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw)
static void
ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
{
volatile uint32_t temp_reg;
volatile u32 temp_reg;
DEBUGFUNC("ixgb_clear_hw_cntrs");
@ -1084,7 +1083,7 @@ ixgb_clear_hw_cntrs(struct ixgb_hw *hw)
void
ixgb_led_on(struct ixgb_hw *hw)
{
uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
/* To turn on the LED, clear software-definable pin 0 (SDP0). */
ctrl0_reg &= ~IXGB_CTRL0_SDP0;
@ -1100,7 +1099,7 @@ ixgb_led_on(struct ixgb_hw *hw)
void
ixgb_led_off(struct ixgb_hw *hw)
{
uint32_t ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
u32 ctrl0_reg = IXGB_READ_REG(hw, CTRL0);
/* To turn off the LED, set software-definable pin 0 (SDP0). */
ctrl0_reg |= IXGB_CTRL0_SDP0;
@ -1116,7 +1115,7 @@ ixgb_led_off(struct ixgb_hw *hw)
static void
ixgb_get_bus_info(struct ixgb_hw *hw)
{
uint32_t status_reg;
u32 status_reg;
status_reg = IXGB_READ_REG(hw, STATUS);
@ -1155,21 +1154,21 @@ ixgb_get_bus_info(struct ixgb_hw *hw)
* mac_addr - pointer to MAC address.
*
*****************************************************************************/
static boolean_t
mac_addr_valid(uint8_t *mac_addr)
static bool
mac_addr_valid(u8 *mac_addr)
{
boolean_t is_valid = TRUE;
bool is_valid = true;
DEBUGFUNC("mac_addr_valid");
/* Make sure it is not a multicast address */
if (IS_MULTICAST(mac_addr)) {
DEBUGOUT("MAC address is multicast\n");
is_valid = FALSE;
is_valid = false;
}
/* Not a broadcast address */
else if (IS_BROADCAST(mac_addr)) {
DEBUGOUT("MAC address is broadcast\n");
is_valid = FALSE;
is_valid = false;
}
/* Reject the zero address */
else if (mac_addr[0] == 0 &&
@ -1179,7 +1178,7 @@ mac_addr_valid(uint8_t *mac_addr)
mac_addr[4] == 0 &&
mac_addr[5] == 0) {
DEBUGOUT("MAC address is all zeros\n");
is_valid = FALSE;
is_valid = false;
}
return (is_valid);
}
@ -1190,12 +1189,12 @@ mac_addr_valid(uint8_t *mac_addr)
*
* hw - Struct containing variables accessed by shared code
*****************************************************************************/
static boolean_t
static bool
ixgb_link_reset(struct ixgb_hw *hw)
{
boolean_t link_status = FALSE;
uint8_t wait_retries = MAX_RESET_ITERATIONS;
uint8_t lrst_retries = MAX_RESET_ITERATIONS;
bool link_status = false;
u8 wait_retries = MAX_RESET_ITERATIONS;
u8 lrst_retries = MAX_RESET_ITERATIONS;
do {
/* Reset the link */
@ -1208,7 +1207,7 @@ ixgb_link_reset(struct ixgb_hw *hw)
link_status =
((IXGB_READ_REG(hw, STATUS) & IXGB_STATUS_LU)
&& (IXGB_READ_REG(hw, XPCSS) &
IXGB_XPCSS_ALIGN_STATUS)) ? TRUE : FALSE;
IXGB_XPCSS_ALIGN_STATUS)) ? true : false;
} while (!link_status && --wait_retries);
} while (!link_status && --lrst_retries);
@ -1225,7 +1224,7 @@ static void
ixgb_optics_reset(struct ixgb_hw *hw)
{
if (hw->phy_type == ixgb_phy_type_txn17401) {
uint16_t mdio_reg;
u16 mdio_reg;
ixgb_write_phy_reg(hw,
MDIO_PMA_PMD_CR1,

Просмотреть файл

@ -538,8 +538,8 @@ struct ixgb_rx_desc {
__le64 buff_addr;
__le16 length;
__le16 reserved;
uint8_t status;
uint8_t errors;
u8 status;
u8 errors;
__le16 special;
};
@ -570,8 +570,8 @@ struct ixgb_rx_desc {
struct ixgb_tx_desc {
__le64 buff_addr;
__le32 cmd_type_len;
uint8_t status;
uint8_t popts;
u8 status;
u8 popts;
__le16 vlan;
};
@ -595,15 +595,15 @@ struct ixgb_tx_desc {
#define IXGB_TX_DESC_SPECIAL_PRI_SHIFT IXGB_RX_DESC_SPECIAL_PRI_SHIFT /* Priority is in upper 3 of 16 */
struct ixgb_context_desc {
uint8_t ipcss;
uint8_t ipcso;
u8 ipcss;
u8 ipcso;
__le16 ipcse;
uint8_t tucss;
uint8_t tucso;
u8 tucss;
u8 tucso;
__le16 tucse;
__le32 cmd_type_len;
uint8_t status;
uint8_t hdr_len;
u8 status;
u8 hdr_len;
__le16 mss;
};
@ -637,33 +637,33 @@ struct ixgb_context_desc {
/* This structure takes a 64k flash and maps it for identification commands */
struct ixgb_flash_buffer {
uint8_t manufacturer_id;
uint8_t device_id;
uint8_t filler1[0x2AA8];
uint8_t cmd2;
uint8_t filler2[0x2AAA];
uint8_t cmd1;
uint8_t filler3[0xAAAA];
u8 manufacturer_id;
u8 device_id;
u8 filler1[0x2AA8];
u8 cmd2;
u8 filler2[0x2AAA];
u8 cmd1;
u8 filler3[0xAAAA];
};
/*
* This is a little-endian specific check.
*/
#define IS_MULTICAST(Address) \
(boolean_t)(((uint8_t *)(Address))[0] & ((uint8_t)0x01))
(bool)(((u8 *)(Address))[0] & ((u8)0x01))
/*
* Check whether an address is broadcast.
*/
#define IS_BROADCAST(Address) \
((((uint8_t *)(Address))[0] == ((uint8_t)0xff)) && (((uint8_t *)(Address))[1] == ((uint8_t)0xff)))
((((u8 *)(Address))[0] == ((u8)0xff)) && (((u8 *)(Address))[1] == ((u8)0xff)))
/* Flow control parameters */
struct ixgb_fc {
uint32_t high_water; /* Flow Control High-water */
uint32_t low_water; /* Flow Control Low-water */
uint16_t pause_time; /* Flow Control Pause timer */
boolean_t send_xon; /* Flow control send XON */
u32 high_water; /* Flow Control High-water */
u32 low_water; /* Flow Control Low-water */
u16 pause_time; /* Flow Control Pause timer */
bool send_xon; /* Flow control send XON */
ixgb_fc_type type; /* Type of flow control */
};
@ -685,139 +685,139 @@ struct ixgb_bus {
};
struct ixgb_hw {
uint8_t __iomem *hw_addr;/* Base Address of the hardware */
u8 __iomem *hw_addr;/* Base Address of the hardware */
void *back; /* Pointer to OS-dependent struct */
struct ixgb_fc fc; /* Flow control parameters */
struct ixgb_bus bus; /* Bus parameters */
uint32_t phy_id; /* Phy Identifier */
uint32_t phy_addr; /* XGMII address of Phy */
u32 phy_id; /* Phy Identifier */
u32 phy_addr; /* XGMII address of Phy */
ixgb_mac_type mac_type; /* Identifier for MAC controller */
ixgb_phy_type phy_type; /* Transceiver/phy identifier */
uint32_t max_frame_size; /* Maximum frame size supported */
uint32_t mc_filter_type; /* Multicast filter hash type */
uint32_t num_mc_addrs; /* Number of current Multicast addrs */
uint8_t curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; /* Individual address currently programmed in MAC */
uint32_t num_tx_desc; /* Number of Transmit descriptors */
uint32_t num_rx_desc; /* Number of Receive descriptors */
uint32_t rx_buffer_size; /* Size of Receive buffer */
boolean_t link_up; /* TRUE if link is valid */
boolean_t adapter_stopped; /* State of adapter */
uint16_t device_id; /* device id from PCI configuration space */
uint16_t vendor_id; /* vendor id from PCI configuration space */
uint8_t revision_id; /* revision id from PCI configuration space */
uint16_t subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */
uint16_t subsystem_id; /* subsystem id from PCI configuration space */
uint32_t bar0; /* Base Address registers */
uint32_t bar1;
uint32_t bar2;
uint32_t bar3;
uint16_t pci_cmd_word; /* PCI command register id from PCI configuration space */
u32 max_frame_size; /* Maximum frame size supported */
u32 mc_filter_type; /* Multicast filter hash type */
u32 num_mc_addrs; /* Number of current Multicast addrs */
u8 curr_mac_addr[IXGB_ETH_LENGTH_OF_ADDRESS]; /* Individual address currently programmed in MAC */
u32 num_tx_desc; /* Number of Transmit descriptors */
u32 num_rx_desc; /* Number of Receive descriptors */
u32 rx_buffer_size; /* Size of Receive buffer */
bool link_up; /* true if link is valid */
bool adapter_stopped; /* State of adapter */
u16 device_id; /* device id from PCI configuration space */
u16 vendor_id; /* vendor id from PCI configuration space */
u8 revision_id; /* revision id from PCI configuration space */
u16 subsystem_vendor_id; /* subsystem vendor id from PCI configuration space */
u16 subsystem_id; /* subsystem id from PCI configuration space */
u32 bar0; /* Base Address registers */
u32 bar1;
u32 bar2;
u32 bar3;
u16 pci_cmd_word; /* PCI command register id from PCI configuration space */
__le16 eeprom[IXGB_EEPROM_SIZE]; /* EEPROM contents read at init time */
unsigned long io_base; /* Our I/O mapped location */
uint32_t lastLFC;
uint32_t lastRFC;
u32 lastLFC;
u32 lastRFC;
};
/* Statistics reported by the hardware */
struct ixgb_hw_stats {
uint64_t tprl;
uint64_t tprh;
uint64_t gprcl;
uint64_t gprch;
uint64_t bprcl;
uint64_t bprch;
uint64_t mprcl;
uint64_t mprch;
uint64_t uprcl;
uint64_t uprch;
uint64_t vprcl;
uint64_t vprch;
uint64_t jprcl;
uint64_t jprch;
uint64_t gorcl;
uint64_t gorch;
uint64_t torl;
uint64_t torh;
uint64_t rnbc;
uint64_t ruc;
uint64_t roc;
uint64_t rlec;
uint64_t crcerrs;
uint64_t icbc;
uint64_t ecbc;
uint64_t mpc;
uint64_t tptl;
uint64_t tpth;
uint64_t gptcl;
uint64_t gptch;
uint64_t bptcl;
uint64_t bptch;
uint64_t mptcl;
uint64_t mptch;
uint64_t uptcl;
uint64_t uptch;
uint64_t vptcl;
uint64_t vptch;
uint64_t jptcl;
uint64_t jptch;
uint64_t gotcl;
uint64_t gotch;
uint64_t totl;
uint64_t toth;
uint64_t dc;
uint64_t plt64c;
uint64_t tsctc;
uint64_t tsctfc;
uint64_t ibic;
uint64_t rfc;
uint64_t lfc;
uint64_t pfrc;
uint64_t pftc;
uint64_t mcfrc;
uint64_t mcftc;
uint64_t xonrxc;
uint64_t xontxc;
uint64_t xoffrxc;
uint64_t xofftxc;
uint64_t rjc;
u64 tprl;
u64 tprh;
u64 gprcl;
u64 gprch;
u64 bprcl;
u64 bprch;
u64 mprcl;
u64 mprch;
u64 uprcl;
u64 uprch;
u64 vprcl;
u64 vprch;
u64 jprcl;
u64 jprch;
u64 gorcl;
u64 gorch;
u64 torl;
u64 torh;
u64 rnbc;
u64 ruc;
u64 roc;
u64 rlec;
u64 crcerrs;
u64 icbc;
u64 ecbc;
u64 mpc;
u64 tptl;
u64 tpth;
u64 gptcl;
u64 gptch;
u64 bptcl;
u64 bptch;
u64 mptcl;
u64 mptch;
u64 uptcl;
u64 uptch;
u64 vptcl;
u64 vptch;
u64 jptcl;
u64 jptch;
u64 gotcl;
u64 gotch;
u64 totl;
u64 toth;
u64 dc;
u64 plt64c;
u64 tsctc;
u64 tsctfc;
u64 ibic;
u64 rfc;
u64 lfc;
u64 pfrc;
u64 pftc;
u64 mcfrc;
u64 mcftc;
u64 xonrxc;
u64 xontxc;
u64 xoffrxc;
u64 xofftxc;
u64 rjc;
};
/* Function Prototypes */
extern boolean_t ixgb_adapter_stop(struct ixgb_hw *hw);
extern boolean_t ixgb_init_hw(struct ixgb_hw *hw);
extern boolean_t ixgb_adapter_start(struct ixgb_hw *hw);
extern bool ixgb_adapter_stop(struct ixgb_hw *hw);
extern bool ixgb_init_hw(struct ixgb_hw *hw);
extern bool ixgb_adapter_start(struct ixgb_hw *hw);
extern void ixgb_check_for_link(struct ixgb_hw *hw);
extern boolean_t ixgb_check_for_bad_link(struct ixgb_hw *hw);
extern bool ixgb_check_for_bad_link(struct ixgb_hw *hw);
extern void ixgb_rar_set(struct ixgb_hw *hw,
uint8_t *addr,
uint32_t index);
u8 *addr,
u32 index);
/* Filters (multicast, vlan, receive) */
extern void ixgb_mc_addr_list_update(struct ixgb_hw *hw,
uint8_t *mc_addr_list,
uint32_t mc_addr_count,
uint32_t pad);
u8 *mc_addr_list,
u32 mc_addr_count,
u32 pad);
/* Vfta functions */
extern void ixgb_write_vfta(struct ixgb_hw *hw,
uint32_t offset,
uint32_t value);
u32 offset,
u32 value);
/* Access functions to eeprom data */
void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, uint8_t *mac_addr);
uint32_t ixgb_get_ee_pba_number(struct ixgb_hw *hw);
uint16_t ixgb_get_ee_device_id(struct ixgb_hw *hw);
boolean_t ixgb_get_eeprom_data(struct ixgb_hw *hw);
__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, uint16_t index);
void ixgb_get_ee_mac_addr(struct ixgb_hw *hw, u8 *mac_addr);
u32 ixgb_get_ee_pba_number(struct ixgb_hw *hw);
u16 ixgb_get_ee_device_id(struct ixgb_hw *hw);
bool ixgb_get_eeprom_data(struct ixgb_hw *hw);
__le16 ixgb_get_eeprom_word(struct ixgb_hw *hw, u16 index);
/* Everything else */
void ixgb_led_on(struct ixgb_hw *hw);
void ixgb_led_off(struct ixgb_hw *hw);
void ixgb_write_pci_cfg(struct ixgb_hw *hw,
uint32_t reg,
uint16_t * value);
u32 reg,
u16 * value);
#endif /* _IXGB_HW_H_ */

Просмотреть файл

@ -67,7 +67,7 @@ MODULE_DEVICE_TABLE(pci, ixgb_pci_tbl);
/* Local Function Prototypes */
int ixgb_up(struct ixgb_adapter *adapter);
void ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog);
void ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog);
void ixgb_reset(struct ixgb_adapter *adapter);
int ixgb_setup_tx_resources(struct ixgb_adapter *adapter);
int ixgb_setup_rx_resources(struct ixgb_adapter *adapter);
@ -94,22 +94,22 @@ static struct net_device_stats *ixgb_get_stats(struct net_device *netdev);
static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
static int ixgb_set_mac(struct net_device *netdev, void *p);
static irqreturn_t ixgb_intr(int irq, void *data);
static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
static bool ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
#ifdef CONFIG_IXGB_NAPI
static int ixgb_clean(struct napi_struct *napi, int budget);
static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
int *work_done, int work_to_do);
static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
int *work_done, int work_to_do);
#else
static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
static bool ixgb_clean_rx_irq(struct ixgb_adapter *adapter);
#endif
static void ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter);
static void ixgb_tx_timeout(struct net_device *dev);
static void ixgb_tx_timeout_task(struct work_struct *work);
static void ixgb_vlan_rx_register(struct net_device *netdev,
struct vlan_group *grp);
static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
#ifdef CONFIG_NET_POLL_CONTROLLER
@ -197,7 +197,6 @@ module_exit(ixgb_exit_module);
static void
ixgb_irq_disable(struct ixgb_adapter *adapter)
{
atomic_inc(&adapter->irq_sem);
IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
IXGB_WRITE_FLUSH(&adapter->hw);
synchronize_irq(adapter->pdev->irq);
@ -211,14 +210,12 @@ ixgb_irq_disable(struct ixgb_adapter *adapter)
static void
ixgb_irq_enable(struct ixgb_adapter *adapter)
{
if(atomic_dec_and_test(&adapter->irq_sem)) {
u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
IXGB_INT_TXDW | IXGB_INT_LSC;
if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
val |= IXGB_INT_GPI0;
IXGB_WRITE_REG(&adapter->hw, IMS, val);
IXGB_WRITE_FLUSH(&adapter->hw);
}
u32 val = IXGB_INT_RXT0 | IXGB_INT_RXDMT0 |
IXGB_INT_TXDW | IXGB_INT_LSC;
if (adapter->hw.subsystem_vendor_id == SUN_SUBVENDOR_ID)
val |= IXGB_INT_GPI0;
IXGB_WRITE_REG(&adapter->hw, IMS, val);
IXGB_WRITE_FLUSH(&adapter->hw);
}
int
@ -274,7 +271,7 @@ ixgb_up(struct ixgb_adapter *adapter)
if(hw->max_frame_size >
IXGB_MAX_ENET_FRAME_SIZE_WITHOUT_FCS + ENET_FCS_LENGTH) {
uint32_t ctrl0 = IXGB_READ_REG(hw, CTRL0);
u32 ctrl0 = IXGB_READ_REG(hw, CTRL0);
if(!(ctrl0 & IXGB_CTRL0_JFE)) {
ctrl0 |= IXGB_CTRL0_JFE;
@ -283,26 +280,30 @@ ixgb_up(struct ixgb_adapter *adapter)
}
}
mod_timer(&adapter->watchdog_timer, jiffies);
clear_bit(__IXGB_DOWN, &adapter->flags);
#ifdef CONFIG_IXGB_NAPI
napi_enable(&adapter->napi);
#endif
ixgb_irq_enable(adapter);
mod_timer(&adapter->watchdog_timer, jiffies);
return 0;
}
void
ixgb_down(struct ixgb_adapter *adapter, boolean_t kill_watchdog)
ixgb_down(struct ixgb_adapter *adapter, bool kill_watchdog)
{
struct net_device *netdev = adapter->netdev;
/* prevent the interrupt handler from restarting watchdog */
set_bit(__IXGB_DOWN, &adapter->flags);
#ifdef CONFIG_IXGB_NAPI
napi_disable(&adapter->napi);
atomic_set(&adapter->irq_sem, 0);
#endif
/* waiting for NAPI to complete can re-enable interrupts */
ixgb_irq_disable(adapter);
free_irq(adapter->pdev->irq, netdev);
@ -589,9 +590,9 @@ ixgb_sw_init(struct ixgb_adapter *adapter)
/* enable flow control to be programmed */
hw->fc.send_xon = 1;
atomic_set(&adapter->irq_sem, 1);
spin_lock_init(&adapter->tx_lock);
set_bit(__IXGB_DOWN, &adapter->flags);
return 0;
}
@ -656,7 +657,7 @@ ixgb_close(struct net_device *netdev)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
ixgb_down(adapter, TRUE);
ixgb_down(adapter, true);
ixgb_free_tx_resources(adapter);
ixgb_free_rx_resources(adapter);
@ -717,9 +718,9 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
static void
ixgb_configure_tx(struct ixgb_adapter *adapter)
{
uint64_t tdba = adapter->tx_ring.dma;
uint32_t tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
uint32_t tctl;
u64 tdba = adapter->tx_ring.dma;
u32 tdlen = adapter->tx_ring.count * sizeof(struct ixgb_tx_desc);
u32 tctl;
struct ixgb_hw *hw = &adapter->hw;
/* Setup the Base and Length of the Tx Descriptor Ring
@ -805,7 +806,7 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
static void
ixgb_setup_rctl(struct ixgb_adapter *adapter)
{
uint32_t rctl;
u32 rctl;
rctl = IXGB_READ_REG(&adapter->hw, RCTL);
@ -840,12 +841,12 @@ ixgb_setup_rctl(struct ixgb_adapter *adapter)
static void
ixgb_configure_rx(struct ixgb_adapter *adapter)
{
uint64_t rdba = adapter->rx_ring.dma;
uint32_t rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
u64 rdba = adapter->rx_ring.dma;
u32 rdlen = adapter->rx_ring.count * sizeof(struct ixgb_rx_desc);
struct ixgb_hw *hw = &adapter->hw;
uint32_t rctl;
uint32_t rxcsum;
uint32_t rxdctl;
u32 rctl;
u32 rxcsum;
u32 rxdctl;
/* make sure receives are disabled while setting up the descriptors */
@ -881,7 +882,7 @@ ixgb_configure_rx(struct ixgb_adapter *adapter)
IXGB_WRITE_REG(hw, RXDCTL, rxdctl);
/* Enable Receive Checksum Offload for TCP and UDP */
if(adapter->rx_csum == TRUE) {
if (adapter->rx_csum) {
rxcsum = IXGB_READ_REG(hw, RXCSUM);
rxcsum |= IXGB_RXCSUM_TUOFL;
IXGB_WRITE_REG(hw, RXCSUM, rxcsum);
@ -1078,7 +1079,7 @@ ixgb_set_multi(struct net_device *netdev)
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
struct dev_mc_list *mc_ptr;
uint32_t rctl;
u32 rctl;
int i;
/* Check for Promiscuous and All Multicast modes */
@ -1098,7 +1099,7 @@ ixgb_set_multi(struct net_device *netdev)
rctl |= IXGB_RCTL_MPE;
IXGB_WRITE_REG(hw, RCTL, rctl);
} else {
uint8_t mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES *
u8 mta[IXGB_MAX_NUM_MULTICAST_ADDRESSES *
IXGB_ETH_LENGTH_OF_ADDRESS];
IXGB_WRITE_REG(hw, RCTL, rctl);
@ -1164,7 +1165,7 @@ ixgb_watchdog(unsigned long data)
}
/* Force detection of hung controller every watchdog period */
adapter->detect_tx_hung = TRUE;
adapter->detect_tx_hung = true;
/* generate an interrupt to force clean up of any stragglers */
IXGB_WRITE_REG(&adapter->hw, ICS, IXGB_INT_TXDW);
@ -1182,8 +1183,8 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
{
struct ixgb_context_desc *context_desc;
unsigned int i;
uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
uint16_t ipcse, tucse, mss;
u8 ipcss, ipcso, tucss, tucso, hdr_len;
u16 ipcse, tucse, mss;
int err;
if (likely(skb_is_gso(skb))) {
@ -1243,12 +1244,12 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
return 0;
}
static boolean_t
static bool
ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
{
struct ixgb_context_desc *context_desc;
unsigned int i;
uint8_t css, cso;
u8 css, cso;
if(likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
struct ixgb_buffer *buffer_info;
@ -1264,7 +1265,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
context_desc->tucso = cso;
context_desc->tucse = 0;
/* zero out any previously existing data in one instruction */
*(uint32_t *)&(context_desc->ipcss) = 0;
*(u32 *)&(context_desc->ipcss) = 0;
context_desc->status = 0;
context_desc->hdr_len = 0;
context_desc->mss = 0;
@ -1275,10 +1276,10 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
if(++i == adapter->tx_ring.count) i = 0;
adapter->tx_ring.next_to_use = i;
return TRUE;
return true;
}
return FALSE;
return false;
}
#define IXGB_MAX_TXD_PWR 14
@ -1371,9 +1372,9 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
struct ixgb_tx_desc *tx_desc = NULL;
struct ixgb_buffer *buffer_info;
uint32_t cmd_type_len = adapter->tx_cmd_type;
uint8_t status = 0;
uint8_t popts = 0;
u32 cmd_type_len = adapter->tx_cmd_type;
u8 status = 0;
u8 popts = 0;
unsigned int i;
if(tx_flags & IXGB_TX_FLAGS_TSO) {
@ -1464,14 +1465,18 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
int vlan_id = 0;
int tso;
if (test_bit(__IXGB_DOWN, &adapter->flags)) {
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
if(skb->len <= 0) {
dev_kfree_skb_any(skb);
return 0;
}
#ifdef NETIF_F_LLTX
local_irq_save(flags);
if (!spin_trylock(&adapter->tx_lock)) {
if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
/* Collision - tell upper layer to requeue */
local_irq_restore(flags);
return NETDEV_TX_LOCKED;
@ -1548,7 +1553,7 @@ ixgb_tx_timeout_task(struct work_struct *work)
container_of(work, struct ixgb_adapter, tx_timeout_task);
adapter->tx_timeout_count++;
ixgb_down(adapter, TRUE);
ixgb_down(adapter, true);
ixgb_up(adapter);
}
@ -1595,7 +1600,7 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
netdev->mtu = new_mtu;
if ((old_max_frame != max_frame) && netif_running(netdev)) {
ixgb_down(adapter, TRUE);
ixgb_down(adapter, true);
ixgb_up(adapter);
}
@ -1745,7 +1750,7 @@ ixgb_intr(int irq, void *data)
struct net_device *netdev = data;
struct ixgb_adapter *adapter = netdev_priv(netdev);
struct ixgb_hw *hw = &adapter->hw;
uint32_t icr = IXGB_READ_REG(hw, ICR);
u32 icr = IXGB_READ_REG(hw, ICR);
#ifndef CONFIG_IXGB_NAPI
unsigned int i;
#endif
@ -1753,9 +1758,9 @@ ixgb_intr(int irq, void *data)
if(unlikely(!icr))
return IRQ_NONE; /* Not our interrupt */
if(unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC))) {
mod_timer(&adapter->watchdog_timer, jiffies);
}
if (unlikely(icr & (IXGB_INT_RXSEQ | IXGB_INT_LSC)))
if (!test_bit(__IXGB_DOWN, &adapter->flags))
mod_timer(&adapter->watchdog_timer, jiffies);
#ifdef CONFIG_IXGB_NAPI
if (netif_rx_schedule_prep(netdev, &adapter->napi)) {
@ -1764,7 +1769,6 @@ ixgb_intr(int irq, void *data)
of the posted write is intentionally left out.
*/
atomic_inc(&adapter->irq_sem);
IXGB_WRITE_REG(&adapter->hw, IMC, ~0);
__netif_rx_schedule(netdev, &adapter->napi);
}
@ -1812,7 +1816,7 @@ ixgb_clean(struct napi_struct *napi, int budget)
* @adapter: board private structure
**/
static boolean_t
static bool
ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
{
struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
@ -1820,7 +1824,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
struct ixgb_tx_desc *tx_desc, *eop_desc;
struct ixgb_buffer *buffer_info;
unsigned int i, eop;
boolean_t cleaned = FALSE;
bool cleaned = false;
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
@ -1828,7 +1832,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
while(eop_desc->status & IXGB_TX_DESC_STATUS_DD) {
for(cleaned = FALSE; !cleaned; ) {
for (cleaned = false; !cleaned; ) {
tx_desc = IXGB_TX_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
@ -1839,7 +1843,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
ixgb_unmap_and_free_tx_resource(adapter, buffer_info);
*(uint32_t *)&(tx_desc->status) = 0;
*(u32 *)&(tx_desc->status) = 0;
cleaned = (i == eop);
if(++i == tx_ring->count) i = 0;
@ -1862,7 +1866,7 @@ ixgb_clean_tx_irq(struct ixgb_adapter *adapter)
if(adapter->detect_tx_hung) {
/* detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */
adapter->detect_tx_hung = FALSE;
adapter->detect_tx_hung = false;
if (tx_ring->buffer_info[eop].dma &&
time_after(jiffies, tx_ring->buffer_info[eop].time_stamp + HZ)
&& !(IXGB_READ_REG(&adapter->hw, STATUS) &
@ -1932,7 +1936,7 @@ ixgb_rx_checksum(struct ixgb_adapter *adapter,
* @adapter: board private structure
**/
static boolean_t
static bool
#ifdef CONFIG_IXGB_NAPI
ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
#else
@ -1944,9 +1948,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct ixgb_rx_desc *rx_desc, *next_rxd;
struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
uint32_t length;
u32 length;
unsigned int i, j;
boolean_t cleaned = FALSE;
bool cleaned = false;
i = rx_ring->next_to_clean;
rx_desc = IXGB_RX_DESC(*rx_ring, i);
@ -1980,7 +1984,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
next_skb = next_buffer->skb;
prefetch(next_skb);
cleaned = TRUE;
cleaned = true;
pci_unmap_single(pdev,
buffer_info->dma,
@ -2162,7 +2166,7 @@ static void
ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
uint32_t ctrl, rctl;
u32 ctrl, rctl;
ixgb_irq_disable(adapter);
adapter->vlgrp = grp;
@ -2193,14 +2197,16 @@ ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
IXGB_WRITE_REG(&adapter->hw, RCTL, rctl);
}
ixgb_irq_enable(adapter);
/* don't enable interrupts unless we are UP */
if (adapter->netdev->flags & IFF_UP)
ixgb_irq_enable(adapter);
}
static void
ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
uint32_t vfta, index;
u32 vfta, index;
/* add VID to filter table */
@ -2211,18 +2217,20 @@ ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid)
}
static void
ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
ixgb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
{
struct ixgb_adapter *adapter = netdev_priv(netdev);
uint32_t vfta, index;
u32 vfta, index;
ixgb_irq_disable(adapter);
vlan_group_set_device(adapter->vlgrp, vid, NULL);
ixgb_irq_enable(adapter);
/* don't enable interrupts unless we are UP */
if (adapter->netdev->flags & IFF_UP)
ixgb_irq_enable(adapter);
/* remove VID from filter table*/
/* remove VID from filter table */
index = (vid >> 5) & 0x7F;
vfta = IXGB_READ_REG_ARRAY(&adapter->hw, VFTA, index);
@ -2236,7 +2244,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
ixgb_vlan_rx_register(adapter->netdev, adapter->vlgrp);
if(adapter->vlgrp) {
uint16_t vid;
u16 vid;
for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
if(!vlan_group_get_device(adapter->vlgrp, vid))
continue;
@ -2277,7 +2285,7 @@ static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
struct ixgb_adapter *adapter = netdev_priv(netdev);
if(netif_running(netdev))
ixgb_down(adapter, TRUE);
ixgb_down(adapter, true);
pci_disable_device(pdev);

Просмотреть файл

@ -39,13 +39,6 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
typedef enum {
#undef FALSE
FALSE = 0,
#undef TRUE
TRUE = 1
} boolean_t;
#undef ASSERT
#define ASSERT(x) if(!(x)) BUG()
#define MSGOUT(S, A, B) printk(KERN_DEBUG S "\n", A, B)

Просмотреть файл

@ -36,6 +36,9 @@
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#ifdef CONFIG_DCA
#include <linux/dca.h>
#endif
#define IXGBE_ERR(args...) printk(KERN_ERR "ixgbe: " args)
@ -120,7 +123,6 @@ struct ixgbe_queue_stats {
};
struct ixgbe_ring {
struct ixgbe_adapter *adapter; /* backlink */
void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */
@ -128,6 +130,7 @@ struct ixgbe_ring {
unsigned int next_to_use;
unsigned int next_to_clean;
int queue_index; /* needed for multiqueue queue management */
union {
struct ixgbe_tx_buffer *tx_buffer_info;
struct ixgbe_rx_buffer *rx_buffer_info;
@ -136,8 +139,21 @@ struct ixgbe_ring {
u16 head;
u16 tail;
unsigned int total_bytes;
unsigned int total_packets;
u16 reg_idx; /* holds the special value that gets the hardware register
* offset associated with this ring, which is different
* for DCE and RSS modes */
#ifdef CONFIG_DCA
/* cpu for tx queue */
int cpu;
#endif
struct ixgbe_queue_stats stats;
u8 v_idx; /* maps directly to the index for this ring in the hardware
* vector array, can also be used for finding the bit in EICR
* and friends that represents the vector for this ring */
u32 eims_value;
u16 itr_register;
@ -146,6 +162,33 @@ struct ixgbe_ring {
u16 work_limit; /* max work per interrupt */
};
#define RING_F_VMDQ 1
#define RING_F_RSS 2
#define IXGBE_MAX_RSS_INDICES 16
#define IXGBE_MAX_VMDQ_INDICES 16
struct ixgbe_ring_feature {
int indices;
int mask;
};
#define MAX_RX_QUEUES 64
#define MAX_TX_QUEUES 32
/* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector.
*/
struct ixgbe_q_vector {
struct ixgbe_adapter *adapter;
struct napi_struct napi;
DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
u8 rxr_count; /* Rx ring count assigned to this vector */
u8 txr_count; /* Tx ring count assigned to this vector */
u8 tx_eitr;
u8 rx_eitr;
u32 eitr;
};
/* Helper macros to switch between ints/sec and what the register uses.
* And yes, it's the same math going both ways.
*/
@ -166,6 +209,14 @@ struct ixgbe_ring {
#define IXGBE_MAX_JUMBO_FRAME_SIZE 16128
#define OTHER_VECTOR 1
#define NON_Q_VECTORS (OTHER_VECTOR)
#define MAX_MSIX_Q_VECTORS 16
#define MIN_MSIX_Q_VECTORS 2
#define MAX_MSIX_COUNT (MAX_MSIX_Q_VECTORS + NON_Q_VECTORS)
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
/* board specific private data structure */
struct ixgbe_adapter {
struct timer_list watchdog_timer;
@ -173,10 +224,16 @@ struct ixgbe_adapter {
u16 bd_number;
u16 rx_buf_len;
struct work_struct reset_task;
struct ixgbe_q_vector q_vector[MAX_MSIX_Q_VECTORS];
char name[MAX_MSIX_COUNT][IFNAMSIZ + 5];
/* Interrupt Throttle Rate */
u32 itr_setting;
u16 eitr_low;
u16 eitr_high;
/* TX */
struct ixgbe_ring *tx_ring; /* One per active queue */
struct napi_struct napi;
u64 restart_queue;
u64 lsc_int;
u64 hw_tso_ctxt;
@ -192,22 +249,27 @@ struct ixgbe_adapter {
u64 non_eop_descs;
int num_tx_queues;
int num_rx_queues;
int num_msix_vectors;
struct ixgbe_ring_feature ring_feature[3];
struct msix_entry *msix_entries;
u64 rx_hdr_split;
u32 alloc_rx_page_failed;
u32 alloc_rx_buff_failed;
/* Some features need tri-state capability,
* thus the additional *_CAPABLE flags.
*/
u32 flags;
#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1)
#define IXGBE_FLAG_RX_CSUM_ENABLED (u32)(1 << 0)
#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1)
#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 2)
#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
/* Interrupt Throttle Rate */
u32 rx_eitr;
u32 tx_eitr;
#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 2)
#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 3)
#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 4)
#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 5)
#define IXGBE_FLAG_RSS_ENABLED (u32)(1 << 6)
#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 7)
#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8)
/* OS defined structs */
struct net_device *netdev;
@ -218,7 +280,10 @@ struct ixgbe_adapter {
struct ixgbe_hw hw;
u16 msg_enable;
struct ixgbe_hw_stats stats;
char lsc_name[IFNAMSIZ + 5];
/* Interrupt Throttle Rate */
u32 rx_eitr;
u32 tx_eitr;
unsigned long state;
u64 tx_busy;

Просмотреть файл

@ -246,13 +246,26 @@ static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
static int ixgbe_set_tso(struct net_device *netdev, u32 data)
{
if (data) {
netdev->features |= NETIF_F_TSO;
netdev->features |= NETIF_F_TSO6;
} else {
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int i;
#endif
netif_stop_queue(netdev);
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
for (i = 0; i < adapter->num_tx_queues; i++)
netif_stop_subqueue(netdev, i);
#endif
netdev->features &= ~NETIF_F_TSO;
netdev->features &= ~NETIF_F_TSO6;
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
for (i = 0; i < adapter->num_tx_queues; i++)
netif_start_subqueue(netdev, i);
#endif
netif_start_queue(netdev);
}
return 0;
}
@ -873,13 +886,13 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if (adapter->rx_eitr == 0)
ec->rx_coalesce_usecs = 0;
if (adapter->rx_eitr < IXGBE_MIN_ITR_USECS)
ec->rx_coalesce_usecs = adapter->rx_eitr;
else
ec->rx_coalesce_usecs = 1000000 / adapter->rx_eitr;
if (adapter->tx_eitr == 0)
ec->tx_coalesce_usecs = 0;
if (adapter->tx_eitr < IXGBE_MIN_ITR_USECS)
ec->tx_coalesce_usecs = adapter->tx_eitr;
else
ec->tx_coalesce_usecs = 1000000 / adapter->tx_eitr;
@ -893,22 +906,26 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
struct ixgbe_adapter *adapter = netdev_priv(netdev);
if ((ec->rx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
((ec->rx_coalesce_usecs > 0) &&
((ec->rx_coalesce_usecs != 0) &&
(ec->rx_coalesce_usecs != 1) &&
(ec->rx_coalesce_usecs != 3) &&
(ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
return -EINVAL;
if ((ec->tx_coalesce_usecs > IXGBE_MAX_ITR_USECS) ||
((ec->tx_coalesce_usecs > 0) &&
((ec->tx_coalesce_usecs != 0) &&
(ec->tx_coalesce_usecs != 1) &&
(ec->tx_coalesce_usecs != 3) &&
(ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)))
return -EINVAL;
/* convert to rate of irq's per second */
if (ec->rx_coalesce_usecs == 0)
adapter->rx_eitr = 0;
if (ec->rx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
adapter->rx_eitr = ec->rx_coalesce_usecs;
else
adapter->rx_eitr = (1000000 / ec->rx_coalesce_usecs);
if (ec->tx_coalesce_usecs == 0)
adapter->tx_eitr = 0;
if (ec->tx_coalesce_usecs < IXGBE_MIN_ITR_USECS)
adapter->tx_eitr = ec->rx_coalesce_usecs;
else
adapter->tx_eitr = (1000000 / ec->tx_coalesce_usecs);

Разница между файлами не показана из-за своего большого размера Загрузить разницу

1233
drivers/net/korina.c Normal file

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -258,7 +258,7 @@ static __net_init int loopback_net_init(struct net *net)
if (!dev)
goto out;
dev->nd_net = net;
dev_net_set(dev, net);
err = register_netdev(dev);
if (err)
goto out_free_netdev;

Просмотреть файл

@ -246,7 +246,7 @@ static int macb_mii_init(struct macb *bp)
bp->mii_bus.read = &macb_mdio_read;
bp->mii_bus.write = &macb_mdio_write;
bp->mii_bus.reset = &macb_mdio_reset;
bp->mii_bus.id = bp->pdev->id;
snprintf(bp->mii_bus.id, MII_BUS_ID_SIZE, "%x", bp->pdev->id);
bp->mii_bus.priv = bp;
bp->mii_bus.dev = &bp->dev->dev;
pdata = bp->pdev->dev.platform_data;

Просмотреть файл

@ -402,7 +402,7 @@ static int macvlan_newlink(struct net_device *dev,
if (!tb[IFLA_LINK])
return -EINVAL;
lowerdev = __dev_get_by_index(dev->nd_net, nla_get_u32(tb[IFLA_LINK]));
lowerdev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK]));
if (lowerdev == NULL)
return -ENODEV;

Разница между файлами не показана из-за своего большого размера Загрузить разницу

Просмотреть файл

@ -511,10 +511,10 @@ enum PhyCtrl_bits {
/* Note that using only 32 bit fields simplifies conversion to big-endian
architectures. */
struct netdev_desc {
u32 next_desc;
s32 cmd_status;
u32 addr;
u32 software_use;
__le32 next_desc;
__le32 cmd_status;
__le32 addr;
__le32 software_use;
};
/* Bits in network_desc.status */
@ -786,7 +786,8 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
struct netdev_private *np;
int i, option, irq, chip_idx = ent->driver_data;
static int find_cnt = -1;
unsigned long iostart, iosize;
resource_size_t iostart;
unsigned long iosize;
void __iomem *ioaddr;
const int pcibar = 1; /* PCI base address register */
int prev_eedata;
@ -946,10 +947,11 @@ static int __devinit natsemi_probe1 (struct pci_dev *pdev,
goto err_create_file;
if (netif_msg_drv(np)) {
printk(KERN_INFO "natsemi %s: %s at %#08lx "
printk(KERN_INFO "natsemi %s: %s at %#08llx "
"(%s), %s, IRQ %d",
dev->name, natsemi_pci_info[chip_idx].name, iostart,
pci_name(np->pci_dev), print_mac(mac, dev->dev_addr), irq);
dev->name, natsemi_pci_info[chip_idx].name,
(unsigned long long)iostart, pci_name(np->pci_dev),
print_mac(mac, dev->dev_addr), irq);
if (dev->if_port == PORT_TP)
printk(", port TP.\n");
else if (np->ignore_phy)
@ -2018,7 +2020,7 @@ static void drain_rx(struct net_device *dev)
/* Free all the skbuffs in the Rx queue. */
for (i = 0; i < RX_RING_SIZE; i++) {
np->rx_ring[i].cmd_status = 0;
np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (np->rx_skbuff[i]) {
pci_unmap_single(np->pci_dev,
np->rx_dma[i], buflen,

Просмотреть файл

@ -95,23 +95,6 @@
#define ADDR_IN_WINDOW1(off) \
((off > NETXEN_CRB_PCIX_HOST2) && (off < NETXEN_CRB_MAX)) ? 1 : 0
/*
* In netxen_nic_down(), we must wait for any pending callback requests into
* netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be
* reenabled right after it is deleted in netxen_nic_down(). FLUSH_SCHEDULED_WORK()
* does this synchronization.
*
* Normally, schedule_work()/flush_scheduled_work() could have worked, but
* netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off()
* call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a
* subsequent call to flush_scheduled_work() in netxen_nic_down() would cause
* linkwatch_event() to be executed which also attempts to acquire the rtnl
* lock thus causing a deadlock.
*/
#define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
#define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
extern struct workqueue_struct *netxen_workq;
/*
* normalize a 64MB crb address to 32MB PCI window
@ -1050,7 +1033,6 @@ void netxen_halt_pegs(struct netxen_adapter *adapter);
int netxen_rom_se(struct netxen_adapter *adapter, int addr);
/* Functions from netxen_nic_isr.c */
int netxen_nic_link_ok(struct netxen_adapter *adapter);
void netxen_initialize_adapter_sw(struct netxen_adapter *adapter);
void netxen_initialize_adapter_hw(struct netxen_adapter *adapter);
void *netxen_alloc(struct pci_dev *pdev, size_t sz, dma_addr_t * ptr,

Просмотреть файл

@ -172,6 +172,7 @@ void netxen_nic_gbe_handle_phy_intr(struct netxen_adapter *adapter)
netxen_nic_isr_other(adapter);
}
#if 0
int netxen_nic_link_ok(struct netxen_adapter *adapter)
{
switch (adapter->ahw.board_type) {
@ -189,6 +190,7 @@ int netxen_nic_link_ok(struct netxen_adapter *adapter)
return 0;
}
#endif /* 0 */
void netxen_nic_xgbe_handle_phy_intr(struct netxen_adapter *adapter)
{

Просмотреть файл

@ -86,7 +86,24 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
MODULE_DEVICE_TABLE(pci, netxen_pci_tbl);
struct workqueue_struct *netxen_workq;
/*
* In netxen_nic_down(), we must wait for any pending callback requests into
* netxen_watchdog_task() to complete; eg otherwise the watchdog_timer could be
* reenabled right after it is deleted in netxen_nic_down().
* FLUSH_SCHEDULED_WORK() does this synchronization.
*
* Normally, schedule_work()/flush_scheduled_work() could have worked, but
* netxen_nic_close() is invoked with kernel rtnl lock held. netif_carrier_off()
* call in netxen_nic_close() triggers a schedule_work(&linkwatch_work), and a
* subsequent call to flush_scheduled_work() in netxen_nic_down() would cause
* linkwatch_event() to be executed which also attempts to acquire the rtnl
* lock thus causing a deadlock.
*/
static struct workqueue_struct *netxen_workq;
#define SCHEDULE_WORK(tp) queue_work(netxen_workq, tp)
#define FLUSH_SCHEDULED_WORK() flush_workqueue(netxen_workq)
static void netxen_watchdog(unsigned long);
static void netxen_nic_update_cmd_producer(struct netxen_adapter *adapter,

Некоторые файлы не были показаны из-за слишком большого количества измененных файлов Показать больше