2005-04-17 06:20:36 +08:00
|
|
|
/* $Id: plip.c,v 1.3.6.2 1997/04/16 15:07:56 phil Exp $ */
|
|
|
|
/* PLIP: A parallel port "network" driver for Linux. */
|
|
|
|
/* This driver is for parallel port with 5-bit cable (LapLink (R) cable). */
|
|
|
|
/*
|
|
|
|
* Authors: Donald Becker <becker@scyld.com>
|
|
|
|
* Tommy Thorn <thorn@daimi.aau.dk>
|
|
|
|
* Tanabe Hiroyasu <hiro@sanpo.t.u-tokyo.ac.jp>
|
|
|
|
* Alan Cox <gw4pts@gw4pts.ampr.org>
|
|
|
|
* Peter Bauer <100136.3530@compuserve.com>
|
|
|
|
* Niibe Yutaka <gniibe@mri.co.jp>
|
|
|
|
* Nimrod Zimerman <zimerman@mailandnews.com>
|
|
|
|
*
|
|
|
|
* Enhancements:
|
|
|
|
* Modularization and ifreq/ifmap support by Alan Cox.
|
|
|
|
* Rewritten by Niibe Yutaka.
|
|
|
|
* parport-sharing awareness code by Philip Blundell.
|
|
|
|
* SMP locking by Niibe Yutaka.
|
|
|
|
* Support for parallel ports with no IRQ (poll mode),
|
2006-09-14 01:24:59 +08:00
|
|
|
* Modifications to use the parallel port API
|
2005-04-17 06:20:36 +08:00
|
|
|
* by Nimrod Zimerman.
|
|
|
|
*
|
|
|
|
* Fixes:
|
|
|
|
* Niibe Yutaka
|
|
|
|
* - Module initialization.
|
|
|
|
* - MTU fix.
|
|
|
|
* - Make sure other end is OK, before sending a packet.
|
|
|
|
* - Fix immediate timer problem.
|
|
|
|
*
|
|
|
|
* Al Viro
|
|
|
|
* - Changed {enable,disable}_irq handling to make it work
|
|
|
|
* with new ("stack") semantics.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version
|
|
|
|
* 2 of the License, or (at your option) any later version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Original version and the name 'PLIP' from Donald Becker <becker@scyld.com>
|
|
|
|
* inspired by Russ Nelson's parallel port packet driver.
|
|
|
|
*
|
|
|
|
* NOTE:
|
|
|
|
* Tanabe Hiroyasu had changed the protocol, and it was in Linux v1.0.
|
|
|
|
* Because of the necessity to communicate to DOS machines with the
|
|
|
|
* Crynwr packet driver, Peter Bauer changed the protocol again
|
|
|
|
* back to original protocol.
|
|
|
|
*
|
|
|
|
* This version follows original PLIP protocol.
|
|
|
|
* So, this PLIP can't communicate the PLIP of Linux v1.0.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* To use with DOS box, please do (Turn on ARP switch):
|
|
|
|
* # ifconfig plip[0-2] arp
|
|
|
|
*/
|
|
|
|
static const char version[] = "NET3 PLIP version 2.4-parport gniibe@mri.co.jp\n";
|
|
|
|
|
|
|
|
/*
|
|
|
|
Sources:
|
|
|
|
Ideas and protocols came from Russ Nelson's <nelson@crynwr.com>
|
|
|
|
"parallel.asm" parallel port packet driver.
|
|
|
|
|
|
|
|
The "Crynwr" parallel port standard specifies the following protocol:
|
|
|
|
Trigger by sending nibble '0x8' (this causes interrupt on other end)
|
|
|
|
count-low octet
|
|
|
|
count-high octet
|
|
|
|
... data octets
|
|
|
|
checksum octet
|
|
|
|
Each octet is sent as <wait for rx. '0x1?'> <send 0x10+(octet&0x0F)>
|
|
|
|
<wait for rx. '0x0?'> <send 0x00+((octet>>4)&0x0F)>
|
|
|
|
|
|
|
|
The packet is encapsulated as if it were ethernet.
|
|
|
|
|
|
|
|
The cable used is a de facto standard parallel null cable -- sold as
|
|
|
|
a "LapLink" cable by various places. You'll need a 12-conductor cable to
|
|
|
|
make one yourself. The wiring is:
|
|
|
|
SLCTIN 17 - 17
|
|
|
|
GROUND 25 - 25
|
|
|
|
D0->ERROR 2 - 15 15 - 2
|
|
|
|
D1->SLCT 3 - 13 13 - 3
|
|
|
|
D2->PAPOUT 4 - 12 12 - 4
|
|
|
|
D3->ACK 5 - 10 10 - 5
|
|
|
|
D4->BUSY 6 - 11 11 - 6
|
|
|
|
Do not connect the other pins. They are
|
|
|
|
D5,D6,D7 are 7,8,9
|
|
|
|
STROBE is 1, FEED is 14, INIT is 16
|
|
|
|
extra grounds are 18,19,20,21,22,23,24
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/fcntl.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/string.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 16:04:11 +08:00
|
|
|
#include <linux/slab.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/if_ether.h>
|
|
|
|
#include <linux/in.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/delay.h>
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/inetdevice.h>
|
|
|
|
#include <linux/skbuff.h>
|
|
|
|
#include <linux/if_plip.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/spinlock.h>
|
2007-12-14 08:03:00 +08:00
|
|
|
#include <linux/completion.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <linux/parport.h>
|
|
|
|
#include <linux/bitops.h>
|
|
|
|
|
|
|
|
#include <net/neighbour.h>
|
|
|
|
|
|
|
|
#include <asm/system.h>
|
|
|
|
#include <asm/irq.h>
|
|
|
|
#include <asm/byteorder.h>
|
|
|
|
|
|
|
|
/* Maximum number of devices to support. */
|
|
|
|
#define PLIP_MAX 8
|
|
|
|
|
|
|
|
/* Use 0 for production, 1 for verification, >2 for debug */
|
|
|
|
#ifndef NET_DEBUG
|
|
|
|
#define NET_DEBUG 1
|
|
|
|
#endif
|
2006-03-04 10:33:57 +08:00
|
|
|
static const unsigned int net_debug = NET_DEBUG;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
#define ENABLE(irq) if (irq != -1) enable_irq(irq)
|
|
|
|
#define DISABLE(irq) if (irq != -1) disable_irq(irq)
|
|
|
|
|
|
|
|
/* In micro second */
|
|
|
|
#define PLIP_DELAY_UNIT 1
|
|
|
|
|
|
|
|
/* Connection time out = PLIP_TRIGGER_WAIT * PLIP_DELAY_UNIT usec */
|
|
|
|
#define PLIP_TRIGGER_WAIT 500
|
|
|
|
|
|
|
|
/* Nibble time out = PLIP_NIBBLE_WAIT * PLIP_DELAY_UNIT usec */
|
|
|
|
#define PLIP_NIBBLE_WAIT 3000
|
|
|
|
|
|
|
|
/* Bottom halves */
|
2006-11-22 22:57:56 +08:00
|
|
|
static void plip_kick_bh(struct work_struct *work);
|
|
|
|
static void plip_bh(struct work_struct *work);
|
|
|
|
static void plip_timer_bh(struct work_struct *work);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Interrupt handler */
|
2007-10-19 14:54:26 +08:00
|
|
|
static void plip_interrupt(void *dev_id);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Functions for DEV methods */
|
|
|
|
static int plip_tx_packet(struct sk_buff *skb, struct net_device *dev);
|
|
|
|
static int plip_hard_header(struct sk_buff *skb, struct net_device *dev,
|
2007-10-09 16:40:57 +08:00
|
|
|
unsigned short type, const void *daddr,
|
|
|
|
const void *saddr, unsigned len);
|
|
|
|
static int plip_hard_header_cache(const struct neighbour *neigh,
|
2011-07-13 14:28:12 +08:00
|
|
|
struct hh_cache *hh, __be16 type);
|
2005-04-17 06:20:36 +08:00
|
|
|
static int plip_open(struct net_device *dev);
|
|
|
|
static int plip_close(struct net_device *dev);
|
|
|
|
static int plip_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
|
|
|
|
static int plip_preempt(void *handle);
|
|
|
|
static void plip_wakeup(void *handle);
|
2005-07-28 02:46:09 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
enum plip_connection_state {
|
|
|
|
PLIP_CN_NONE=0,
|
|
|
|
PLIP_CN_RECEIVE,
|
|
|
|
PLIP_CN_SEND,
|
|
|
|
PLIP_CN_CLOSING,
|
|
|
|
PLIP_CN_ERROR
|
|
|
|
};
|
|
|
|
|
|
|
|
enum plip_packet_state {
|
|
|
|
PLIP_PK_DONE=0,
|
|
|
|
PLIP_PK_TRIGGER,
|
|
|
|
PLIP_PK_LENGTH_LSB,
|
|
|
|
PLIP_PK_LENGTH_MSB,
|
|
|
|
PLIP_PK_DATA,
|
|
|
|
PLIP_PK_CHECKSUM
|
|
|
|
};
|
|
|
|
|
|
|
|
enum plip_nibble_state {
|
|
|
|
PLIP_NB_BEGIN,
|
|
|
|
PLIP_NB_1,
|
|
|
|
PLIP_NB_2,
|
|
|
|
};
|
|
|
|
|
|
|
|
struct plip_local {
|
|
|
|
enum plip_packet_state state;
|
|
|
|
enum plip_nibble_state nibble;
|
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
#if defined(__LITTLE_ENDIAN)
|
|
|
|
unsigned char lsb;
|
|
|
|
unsigned char msb;
|
|
|
|
#elif defined(__BIG_ENDIAN)
|
|
|
|
unsigned char msb;
|
|
|
|
unsigned char lsb;
|
|
|
|
#else
|
|
|
|
#error "Please fix the endianness defines in <asm/byteorder.h>"
|
|
|
|
#endif
|
|
|
|
} b;
|
|
|
|
unsigned short h;
|
|
|
|
} length;
|
|
|
|
unsigned short byte;
|
|
|
|
unsigned char checksum;
|
|
|
|
unsigned char data;
|
|
|
|
struct sk_buff *skb;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct net_local {
|
2006-11-22 22:57:56 +08:00
|
|
|
struct net_device *dev;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct work_struct immediate;
|
2006-11-22 22:57:56 +08:00
|
|
|
struct delayed_work deferred;
|
|
|
|
struct delayed_work timer;
|
2005-04-17 06:20:36 +08:00
|
|
|
struct plip_local snd_data;
|
|
|
|
struct plip_local rcv_data;
|
|
|
|
struct pardevice *pardev;
|
|
|
|
unsigned long trigger;
|
|
|
|
unsigned long nibble;
|
|
|
|
enum plip_connection_state connection;
|
|
|
|
unsigned short timeout_count;
|
|
|
|
int is_deferred;
|
|
|
|
int port_owner;
|
|
|
|
int should_relinquish;
|
|
|
|
spinlock_t lock;
|
|
|
|
atomic_t kill_timer;
|
2007-12-14 08:03:00 +08:00
|
|
|
struct completion killed_timer_cmp;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
2005-07-28 02:46:09 +08:00
|
|
|
|
|
|
|
static inline void enable_parport_interrupts (struct net_device *dev)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
if (dev->irq != -1)
|
|
|
|
{
|
|
|
|
struct parport *port =
|
2008-11-13 15:38:14 +08:00
|
|
|
((struct net_local *)netdev_priv(dev))->pardev->port;
|
2005-04-17 06:20:36 +08:00
|
|
|
port->ops->enable_irq (port);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-07-28 02:46:09 +08:00
|
|
|
static inline void disable_parport_interrupts (struct net_device *dev)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
if (dev->irq != -1)
|
|
|
|
{
|
|
|
|
struct parport *port =
|
2008-11-13 15:38:14 +08:00
|
|
|
((struct net_local *)netdev_priv(dev))->pardev->port;
|
2005-04-17 06:20:36 +08:00
|
|
|
port->ops->disable_irq (port);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2005-07-28 02:46:09 +08:00
|
|
|
static inline void write_data (struct net_device *dev, unsigned char data)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct parport *port =
|
2008-11-13 15:38:14 +08:00
|
|
|
((struct net_local *)netdev_priv(dev))->pardev->port;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
port->ops->write_data (port, data);
|
|
|
|
}
|
|
|
|
|
2005-07-28 02:46:09 +08:00
|
|
|
static inline unsigned char read_status (struct net_device *dev)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct parport *port =
|
2008-11-13 15:38:14 +08:00
|
|
|
((struct net_local *)netdev_priv(dev))->pardev->port;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
return port->ops->read_status (port);
|
|
|
|
}
|
2005-07-28 02:46:09 +08:00
|
|
|
|
2007-10-09 16:40:57 +08:00
|
|
|
static const struct header_ops plip_header_ops = {
|
|
|
|
.create = plip_hard_header,
|
|
|
|
.cache = plip_hard_header_cache,
|
|
|
|
};
|
|
|
|
|
2009-01-08 09:26:48 +08:00
|
|
|
static const struct net_device_ops plip_netdev_ops = {
|
|
|
|
.ndo_open = plip_open,
|
|
|
|
.ndo_stop = plip_close,
|
|
|
|
.ndo_start_xmit = plip_tx_packet,
|
|
|
|
.ndo_do_ioctl = plip_ioctl,
|
2009-07-10 01:59:01 +08:00
|
|
|
.ndo_change_mtu = eth_change_mtu,
|
2009-07-10 01:54:35 +08:00
|
|
|
.ndo_set_mac_address = eth_mac_addr,
|
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
2009-01-08 09:26:48 +08:00
|
|
|
};
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Entry point of PLIP driver.
|
|
|
|
Probe the hardware, and register/initialize the driver.
|
|
|
|
|
|
|
|
PLIP is rather weird, because of the way it interacts with the parport
|
|
|
|
system. It is _not_ initialised from Space.c. Instead, plip_init()
|
|
|
|
is called, and that function makes up a "struct net_device" for each port, and
|
|
|
|
then calls us here.
|
|
|
|
|
|
|
|
*/
|
|
|
|
static void
|
|
|
|
plip_init_netdev(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct net_local *nl = netdev_priv(dev);
|
|
|
|
|
|
|
|
/* Then, override parts of it */
|
|
|
|
dev->tx_queue_len = 10;
|
|
|
|
dev->flags = IFF_POINTOPOINT|IFF_NOARP;
|
|
|
|
memset(dev->dev_addr, 0xfc, ETH_ALEN);
|
|
|
|
|
2009-01-08 09:26:48 +08:00
|
|
|
dev->netdev_ops = &plip_netdev_ops;
|
2007-10-09 16:40:57 +08:00
|
|
|
dev->header_ops = &plip_header_ops;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
|
|
|
|
nl->port_owner = 0;
|
|
|
|
|
|
|
|
/* Initialize constants */
|
|
|
|
nl->trigger = PLIP_TRIGGER_WAIT;
|
|
|
|
nl->nibble = PLIP_NIBBLE_WAIT;
|
|
|
|
|
|
|
|
/* Initialize task queue structures */
|
2006-11-22 22:57:56 +08:00
|
|
|
INIT_WORK(&nl->immediate, plip_bh);
|
|
|
|
INIT_DELAYED_WORK(&nl->deferred, plip_kick_bh);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (dev->irq == -1)
|
2006-11-22 22:57:56 +08:00
|
|
|
INIT_DELAYED_WORK(&nl->timer, plip_timer_bh);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
spin_lock_init(&nl->lock);
|
|
|
|
}
|
2005-07-28 02:46:09 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Bottom half handler for the delayed request.
|
|
|
|
This routine is kicked by do_timer().
|
|
|
|
Request `plip_bh' to be invoked. */
|
|
|
|
static void
|
2006-11-22 22:57:56 +08:00
|
|
|
plip_kick_bh(struct work_struct *work)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-11-22 22:57:56 +08:00
|
|
|
struct net_local *nl =
|
|
|
|
container_of(work, struct net_local, deferred.work);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
if (nl->is_deferred)
|
|
|
|
schedule_work(&nl->immediate);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Forward declarations of internal routines */
|
|
|
|
static int plip_none(struct net_device *, struct net_local *,
|
|
|
|
struct plip_local *, struct plip_local *);
|
|
|
|
static int plip_receive_packet(struct net_device *, struct net_local *,
|
|
|
|
struct plip_local *, struct plip_local *);
|
|
|
|
static int plip_send_packet(struct net_device *, struct net_local *,
|
|
|
|
struct plip_local *, struct plip_local *);
|
|
|
|
static int plip_connection_close(struct net_device *, struct net_local *,
|
|
|
|
struct plip_local *, struct plip_local *);
|
|
|
|
static int plip_error(struct net_device *, struct net_local *,
|
|
|
|
struct plip_local *, struct plip_local *);
|
|
|
|
static int plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
|
|
|
|
struct plip_local *snd,
|
|
|
|
struct plip_local *rcv,
|
|
|
|
int error);
|
|
|
|
|
|
|
|
#define OK 0
|
|
|
|
#define TIMEOUT 1
|
|
|
|
#define ERROR 2
|
|
|
|
#define HS_TIMEOUT 3
|
|
|
|
|
|
|
|
typedef int (*plip_func)(struct net_device *dev, struct net_local *nl,
|
|
|
|
struct plip_local *snd, struct plip_local *rcv);
|
|
|
|
|
2006-03-04 10:33:57 +08:00
|
|
|
static const plip_func connection_state_table[] =
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
plip_none,
|
|
|
|
plip_receive_packet,
|
|
|
|
plip_send_packet,
|
|
|
|
plip_connection_close,
|
|
|
|
plip_error
|
|
|
|
};
|
|
|
|
|
|
|
|
/* Bottom half handler of PLIP. */
|
|
|
|
static void
|
2006-11-22 22:57:56 +08:00
|
|
|
plip_bh(struct work_struct *work)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-11-22 22:57:56 +08:00
|
|
|
struct net_local *nl = container_of(work, struct net_local, immediate);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct plip_local *snd = &nl->snd_data;
|
|
|
|
struct plip_local *rcv = &nl->rcv_data;
|
|
|
|
plip_func f;
|
|
|
|
int r;
|
|
|
|
|
|
|
|
nl->is_deferred = 0;
|
|
|
|
f = connection_state_table[nl->connection];
|
2009-12-03 15:58:21 +08:00
|
|
|
if ((r = (*f)(nl->dev, nl, snd, rcv)) != OK &&
|
|
|
|
(r = plip_bh_timeout_error(nl->dev, nl, snd, rcv, r)) != OK) {
|
2005-04-17 06:20:36 +08:00
|
|
|
nl->is_deferred = 1;
|
|
|
|
schedule_delayed_work(&nl->deferred, 1);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2006-11-22 22:57:56 +08:00
|
|
|
plip_timer_bh(struct work_struct *work)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2006-11-22 22:57:56 +08:00
|
|
|
struct net_local *nl =
|
|
|
|
container_of(work, struct net_local, timer.work);
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!(atomic_read (&nl->kill_timer))) {
|
2007-10-19 14:54:26 +08:00
|
|
|
plip_interrupt (nl->dev);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
schedule_delayed_work(&nl->timer, 1);
|
|
|
|
}
|
|
|
|
else {
|
2007-12-14 08:03:00 +08:00
|
|
|
complete(&nl->killed_timer_cmp);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
plip_bh_timeout_error(struct net_device *dev, struct net_local *nl,
|
|
|
|
struct plip_local *snd, struct plip_local *rcv,
|
|
|
|
int error)
|
|
|
|
{
|
|
|
|
unsigned char c0;
|
|
|
|
/*
|
|
|
|
* This is tricky. If we got here from the beginning of send (either
|
|
|
|
* with ERROR or HS_TIMEOUT) we have IRQ enabled. Otherwise it's
|
|
|
|
* already disabled. With the old variant of {enable,disable}_irq()
|
|
|
|
* extra disable_irq() was a no-op. Now it became mortal - it's
|
|
|
|
* unbalanced and thus we'll never re-enable IRQ (until rmmod plip,
|
|
|
|
* that is). So we have to treat HS_TIMEOUT and ERROR from send
|
|
|
|
* in a special way.
|
|
|
|
*/
|
|
|
|
|
|
|
|
spin_lock_irq(&nl->lock);
|
|
|
|
if (nl->connection == PLIP_CN_SEND) {
|
|
|
|
|
|
|
|
if (error != ERROR) { /* Timeout */
|
|
|
|
nl->timeout_count++;
|
2009-12-03 15:58:21 +08:00
|
|
|
if ((error == HS_TIMEOUT && nl->timeout_count <= 10) ||
|
|
|
|
nl->timeout_count <= 3) {
|
2005-04-17 06:20:36 +08:00
|
|
|
spin_unlock_irq(&nl->lock);
|
|
|
|
/* Try again later */
|
|
|
|
return TIMEOUT;
|
|
|
|
}
|
|
|
|
c0 = read_status(dev);
|
|
|
|
printk(KERN_WARNING "%s: transmit timeout(%d,%02x)\n",
|
|
|
|
dev->name, snd->state, c0);
|
|
|
|
} else
|
|
|
|
error = HS_TIMEOUT;
|
2007-10-04 08:41:50 +08:00
|
|
|
dev->stats.tx_errors++;
|
|
|
|
dev->stats.tx_aborted_errors++;
|
2005-04-17 06:20:36 +08:00
|
|
|
} else if (nl->connection == PLIP_CN_RECEIVE) {
|
|
|
|
if (rcv->state == PLIP_PK_TRIGGER) {
|
|
|
|
/* Transmission was interrupted. */
|
|
|
|
spin_unlock_irq(&nl->lock);
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
if (error != ERROR) { /* Timeout */
|
|
|
|
if (++nl->timeout_count <= 3) {
|
|
|
|
spin_unlock_irq(&nl->lock);
|
|
|
|
/* Try again later */
|
|
|
|
return TIMEOUT;
|
|
|
|
}
|
|
|
|
c0 = read_status(dev);
|
|
|
|
printk(KERN_WARNING "%s: receive timeout(%d,%02x)\n",
|
|
|
|
dev->name, rcv->state, c0);
|
|
|
|
}
|
2007-10-04 08:41:50 +08:00
|
|
|
dev->stats.rx_dropped++;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
rcv->state = PLIP_PK_DONE;
|
|
|
|
if (rcv->skb) {
|
|
|
|
kfree_skb(rcv->skb);
|
|
|
|
rcv->skb = NULL;
|
|
|
|
}
|
|
|
|
snd->state = PLIP_PK_DONE;
|
|
|
|
if (snd->skb) {
|
|
|
|
dev_kfree_skb(snd->skb);
|
|
|
|
snd->skb = NULL;
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&nl->lock);
|
|
|
|
if (error == HS_TIMEOUT) {
|
|
|
|
DISABLE(dev->irq);
|
|
|
|
synchronize_irq(dev->irq);
|
|
|
|
}
|
|
|
|
disable_parport_interrupts (dev);
|
|
|
|
netif_stop_queue (dev);
|
|
|
|
nl->connection = PLIP_CN_ERROR;
|
|
|
|
write_data (dev, 0x00);
|
|
|
|
|
|
|
|
return TIMEOUT;
|
|
|
|
}
|
2005-07-28 02:46:09 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int
|
|
|
|
plip_none(struct net_device *dev, struct net_local *nl,
|
|
|
|
struct plip_local *snd, struct plip_local *rcv)
|
|
|
|
{
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PLIP_RECEIVE --- receive a byte(two nibbles)
|
|
|
|
Returns OK on success, TIMEOUT on timeout */
|
2005-07-28 02:46:09 +08:00
|
|
|
static inline int
|
2005-04-17 06:20:36 +08:00
|
|
|
plip_receive(unsigned short nibble_timeout, struct net_device *dev,
|
|
|
|
enum plip_nibble_state *ns_p, unsigned char *data_p)
|
|
|
|
{
|
|
|
|
unsigned char c0, c1;
|
|
|
|
unsigned int cx;
|
|
|
|
|
|
|
|
switch (*ns_p) {
|
|
|
|
case PLIP_NB_BEGIN:
|
|
|
|
cx = nibble_timeout;
|
|
|
|
while (1) {
|
|
|
|
c0 = read_status(dev);
|
|
|
|
udelay(PLIP_DELAY_UNIT);
|
|
|
|
if ((c0 & 0x80) == 0) {
|
|
|
|
c1 = read_status(dev);
|
|
|
|
if (c0 == c1)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (--cx == 0)
|
|
|
|
return TIMEOUT;
|
|
|
|
}
|
|
|
|
*data_p = (c0 >> 3) & 0x0f;
|
|
|
|
write_data (dev, 0x10); /* send ACK */
|
|
|
|
*ns_p = PLIP_NB_1;
|
|
|
|
|
|
|
|
case PLIP_NB_1:
|
|
|
|
cx = nibble_timeout;
|
|
|
|
while (1) {
|
|
|
|
c0 = read_status(dev);
|
|
|
|
udelay(PLIP_DELAY_UNIT);
|
|
|
|
if (c0 & 0x80) {
|
|
|
|
c1 = read_status(dev);
|
|
|
|
if (c0 == c1)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (--cx == 0)
|
|
|
|
return TIMEOUT;
|
|
|
|
}
|
|
|
|
*data_p |= (c0 << 1) & 0xf0;
|
|
|
|
write_data (dev, 0x00); /* send ACK */
|
|
|
|
*ns_p = PLIP_NB_BEGIN;
|
|
|
|
case PLIP_NB_2:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2006-09-14 01:24:59 +08:00
|
|
|
* Determine the packet's protocol ID. The rule here is that we
|
2005-04-17 06:20:36 +08:00
|
|
|
* assume 802.3 if the type field is short enough to be a length.
|
|
|
|
* This is normal practice and works for any 'now in use' protocol.
|
|
|
|
*
|
|
|
|
* PLIP is ethernet ish but the daddr might not be valid if unicast.
|
|
|
|
* PLIP fortunately has no bus architecture (its Point-to-point).
|
|
|
|
*
|
|
|
|
* We can't fix the daddr thing as that quirk (more bug) is embedded
|
|
|
|
* in far too many old systems not all even running Linux.
|
|
|
|
*/
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2005-07-13 03:08:43 +08:00
|
|
|
static __be16 plip_type_trans(struct sk_buff *skb, struct net_device *dev)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct ethhdr *eth;
|
|
|
|
unsigned char *rawp;
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2007-03-20 06:33:04 +08:00
|
|
|
skb_reset_mac_header(skb);
|
2005-04-17 06:20:36 +08:00
|
|
|
skb_pull(skb,dev->hard_header_len);
|
|
|
|
eth = eth_hdr(skb);
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if(*eth->h_dest&1)
|
|
|
|
{
|
|
|
|
if(memcmp(eth->h_dest,dev->broadcast, ETH_ALEN)==0)
|
|
|
|
skb->pkt_type=PACKET_BROADCAST;
|
|
|
|
else
|
|
|
|
skb->pkt_type=PACKET_MULTICAST;
|
|
|
|
}
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This ALLMULTI check should be redundant by 1.4
|
|
|
|
* so don't forget to remove it.
|
|
|
|
*/
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (ntohs(eth->h_proto) >= 1536)
|
|
|
|
return eth->h_proto;
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
rawp = skb->data;
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* This is a magic hack to spot IPX packets. Older Novell breaks
|
|
|
|
* the protocol design and runs IPX over 802.3 without an 802.2 LLC
|
|
|
|
* layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
|
|
|
|
* won't work for fault tolerant netware but does for the rest.
|
|
|
|
*/
|
|
|
|
if (*(unsigned short *)rawp == 0xFFFF)
|
|
|
|
return htons(ETH_P_802_3);
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
|
|
|
* Real 802.2 LLC
|
|
|
|
*/
|
|
|
|
return htons(ETH_P_802_2);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PLIP_RECEIVE_PACKET --- receive a packet */
|
|
|
|
static int
|
|
|
|
plip_receive_packet(struct net_device *dev, struct net_local *nl,
|
|
|
|
struct plip_local *snd, struct plip_local *rcv)
|
|
|
|
{
|
|
|
|
unsigned short nibble_timeout = nl->nibble;
|
|
|
|
unsigned char *lbuf;
|
|
|
|
|
|
|
|
switch (rcv->state) {
|
|
|
|
case PLIP_PK_TRIGGER:
|
|
|
|
DISABLE(dev->irq);
|
|
|
|
/* Don't need to synchronize irq, as we can safely ignore it */
|
|
|
|
disable_parport_interrupts (dev);
|
|
|
|
write_data (dev, 0x01); /* send ACK */
|
|
|
|
if (net_debug > 2)
|
|
|
|
printk(KERN_DEBUG "%s: receive start\n", dev->name);
|
|
|
|
rcv->state = PLIP_PK_LENGTH_LSB;
|
|
|
|
rcv->nibble = PLIP_NB_BEGIN;
|
|
|
|
|
|
|
|
case PLIP_PK_LENGTH_LSB:
|
|
|
|
if (snd->state != PLIP_PK_DONE) {
|
|
|
|
if (plip_receive(nl->trigger, dev,
|
|
|
|
&rcv->nibble, &rcv->length.b.lsb)) {
|
|
|
|
/* collision, here dev->tbusy == 1 */
|
|
|
|
rcv->state = PLIP_PK_DONE;
|
|
|
|
nl->is_deferred = 1;
|
|
|
|
nl->connection = PLIP_CN_SEND;
|
|
|
|
schedule_delayed_work(&nl->deferred, 1);
|
|
|
|
enable_parport_interrupts (dev);
|
|
|
|
ENABLE(dev->irq);
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (plip_receive(nibble_timeout, dev,
|
|
|
|
&rcv->nibble, &rcv->length.b.lsb))
|
|
|
|
return TIMEOUT;
|
|
|
|
}
|
|
|
|
rcv->state = PLIP_PK_LENGTH_MSB;
|
|
|
|
|
|
|
|
case PLIP_PK_LENGTH_MSB:
|
|
|
|
if (plip_receive(nibble_timeout, dev,
|
|
|
|
&rcv->nibble, &rcv->length.b.msb))
|
|
|
|
return TIMEOUT;
|
2009-12-03 15:58:21 +08:00
|
|
|
if (rcv->length.h > dev->mtu + dev->hard_header_len ||
|
|
|
|
rcv->length.h < 8) {
|
2005-04-17 06:20:36 +08:00
|
|
|
printk(KERN_WARNING "%s: bogus packet size %d.\n", dev->name, rcv->length.h);
|
|
|
|
return ERROR;
|
|
|
|
}
|
|
|
|
/* Malloc up new buffer. */
|
|
|
|
rcv->skb = dev_alloc_skb(rcv->length.h + 2);
|
|
|
|
if (rcv->skb == NULL) {
|
|
|
|
printk(KERN_ERR "%s: Memory squeeze.\n", dev->name);
|
|
|
|
return ERROR;
|
|
|
|
}
|
|
|
|
skb_reserve(rcv->skb, 2); /* Align IP on 16 byte boundaries */
|
|
|
|
skb_put(rcv->skb,rcv->length.h);
|
|
|
|
rcv->skb->dev = dev;
|
|
|
|
rcv->state = PLIP_PK_DATA;
|
|
|
|
rcv->byte = 0;
|
|
|
|
rcv->checksum = 0;
|
|
|
|
|
|
|
|
case PLIP_PK_DATA:
|
|
|
|
lbuf = rcv->skb->data;
|
2008-12-26 15:55:35 +08:00
|
|
|
do {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (plip_receive(nibble_timeout, dev,
|
|
|
|
&rcv->nibble, &lbuf[rcv->byte]))
|
|
|
|
return TIMEOUT;
|
2008-12-26 15:55:35 +08:00
|
|
|
} while (++rcv->byte < rcv->length.h);
|
|
|
|
do {
|
2005-04-17 06:20:36 +08:00
|
|
|
rcv->checksum += lbuf[--rcv->byte];
|
2008-12-26 15:55:35 +08:00
|
|
|
} while (rcv->byte);
|
2005-04-17 06:20:36 +08:00
|
|
|
rcv->state = PLIP_PK_CHECKSUM;
|
|
|
|
|
|
|
|
case PLIP_PK_CHECKSUM:
|
|
|
|
if (plip_receive(nibble_timeout, dev,
|
|
|
|
&rcv->nibble, &rcv->data))
|
|
|
|
return TIMEOUT;
|
|
|
|
if (rcv->data != rcv->checksum) {
|
2007-10-04 08:41:50 +08:00
|
|
|
dev->stats.rx_crc_errors++;
|
2005-04-17 06:20:36 +08:00
|
|
|
if (net_debug)
|
|
|
|
printk(KERN_DEBUG "%s: checksum error\n", dev->name);
|
|
|
|
return ERROR;
|
|
|
|
}
|
|
|
|
rcv->state = PLIP_PK_DONE;
|
|
|
|
|
|
|
|
case PLIP_PK_DONE:
|
|
|
|
/* Inform the upper layer for the arrival of a packet. */
|
|
|
|
rcv->skb->protocol=plip_type_trans(rcv->skb, dev);
|
2007-11-23 04:19:39 +08:00
|
|
|
netif_rx_ni(rcv->skb);
|
2007-10-04 08:41:50 +08:00
|
|
|
dev->stats.rx_bytes += rcv->length.h;
|
|
|
|
dev->stats.rx_packets++;
|
2005-04-17 06:20:36 +08:00
|
|
|
rcv->skb = NULL;
|
|
|
|
if (net_debug > 2)
|
|
|
|
printk(KERN_DEBUG "%s: receive end\n", dev->name);
|
|
|
|
|
|
|
|
/* Close the connection. */
|
|
|
|
write_data (dev, 0x00);
|
|
|
|
spin_lock_irq(&nl->lock);
|
|
|
|
if (snd->state != PLIP_PK_DONE) {
|
|
|
|
nl->connection = PLIP_CN_SEND;
|
|
|
|
spin_unlock_irq(&nl->lock);
|
|
|
|
schedule_work(&nl->immediate);
|
|
|
|
enable_parport_interrupts (dev);
|
|
|
|
ENABLE(dev->irq);
|
|
|
|
return OK;
|
|
|
|
} else {
|
|
|
|
nl->connection = PLIP_CN_NONE;
|
|
|
|
spin_unlock_irq(&nl->lock);
|
|
|
|
enable_parport_interrupts (dev);
|
|
|
|
ENABLE(dev->irq);
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PLIP_SEND --- send a byte (two nibbles)
|
|
|
|
Returns OK on success, TIMEOUT when timeout */
|
2005-07-28 02:46:09 +08:00
|
|
|
static inline int
|
2005-04-17 06:20:36 +08:00
|
|
|
plip_send(unsigned short nibble_timeout, struct net_device *dev,
|
|
|
|
enum plip_nibble_state *ns_p, unsigned char data)
|
|
|
|
{
|
|
|
|
unsigned char c0;
|
|
|
|
unsigned int cx;
|
|
|
|
|
|
|
|
switch (*ns_p) {
|
|
|
|
case PLIP_NB_BEGIN:
|
|
|
|
write_data (dev, data & 0x0f);
|
|
|
|
*ns_p = PLIP_NB_1;
|
|
|
|
|
|
|
|
case PLIP_NB_1:
|
|
|
|
write_data (dev, 0x10 | (data & 0x0f));
|
|
|
|
cx = nibble_timeout;
|
|
|
|
while (1) {
|
|
|
|
c0 = read_status(dev);
|
|
|
|
if ((c0 & 0x80) == 0)
|
|
|
|
break;
|
|
|
|
if (--cx == 0)
|
|
|
|
return TIMEOUT;
|
|
|
|
udelay(PLIP_DELAY_UNIT);
|
|
|
|
}
|
|
|
|
write_data (dev, 0x10 | (data >> 4));
|
|
|
|
*ns_p = PLIP_NB_2;
|
|
|
|
|
|
|
|
case PLIP_NB_2:
|
|
|
|
write_data (dev, (data >> 4));
|
|
|
|
cx = nibble_timeout;
|
|
|
|
while (1) {
|
|
|
|
c0 = read_status(dev);
|
|
|
|
if (c0 & 0x80)
|
|
|
|
break;
|
|
|
|
if (--cx == 0)
|
|
|
|
return TIMEOUT;
|
|
|
|
udelay(PLIP_DELAY_UNIT);
|
|
|
|
}
|
|
|
|
*ns_p = PLIP_NB_BEGIN;
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PLIP_SEND_PACKET --- send a packet */
|
|
|
|
static int
|
|
|
|
plip_send_packet(struct net_device *dev, struct net_local *nl,
|
|
|
|
struct plip_local *snd, struct plip_local *rcv)
|
|
|
|
{
|
|
|
|
unsigned short nibble_timeout = nl->nibble;
|
|
|
|
unsigned char *lbuf;
|
|
|
|
unsigned char c0;
|
|
|
|
unsigned int cx;
|
|
|
|
|
|
|
|
if (snd->skb == NULL || (lbuf = snd->skb->data) == NULL) {
|
|
|
|
printk(KERN_DEBUG "%s: send skb lost\n", dev->name);
|
|
|
|
snd->state = PLIP_PK_DONE;
|
|
|
|
snd->skb = NULL;
|
|
|
|
return ERROR;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (snd->state) {
|
|
|
|
case PLIP_PK_TRIGGER:
|
|
|
|
if ((read_status(dev) & 0xf8) != 0x80)
|
|
|
|
return HS_TIMEOUT;
|
|
|
|
|
|
|
|
/* Trigger remote rx interrupt. */
|
|
|
|
write_data (dev, 0x08);
|
|
|
|
cx = nl->trigger;
|
|
|
|
while (1) {
|
|
|
|
udelay(PLIP_DELAY_UNIT);
|
|
|
|
spin_lock_irq(&nl->lock);
|
|
|
|
if (nl->connection == PLIP_CN_RECEIVE) {
|
|
|
|
spin_unlock_irq(&nl->lock);
|
|
|
|
/* Interrupted. */
|
2007-10-04 08:41:50 +08:00
|
|
|
dev->stats.collisions++;
|
2005-04-17 06:20:36 +08:00
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
c0 = read_status(dev);
|
|
|
|
if (c0 & 0x08) {
|
|
|
|
spin_unlock_irq(&nl->lock);
|
|
|
|
DISABLE(dev->irq);
|
|
|
|
synchronize_irq(dev->irq);
|
|
|
|
if (nl->connection == PLIP_CN_RECEIVE) {
|
|
|
|
/* Interrupted.
|
|
|
|
We don't need to enable irq,
|
|
|
|
as it is soon disabled. */
|
|
|
|
/* Yes, we do. New variant of
|
|
|
|
{enable,disable}_irq *counts*
|
|
|
|
them. -- AV */
|
|
|
|
ENABLE(dev->irq);
|
2007-10-04 08:41:50 +08:00
|
|
|
dev->stats.collisions++;
|
2005-04-17 06:20:36 +08:00
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
disable_parport_interrupts (dev);
|
|
|
|
if (net_debug > 2)
|
|
|
|
printk(KERN_DEBUG "%s: send start\n", dev->name);
|
|
|
|
snd->state = PLIP_PK_LENGTH_LSB;
|
|
|
|
snd->nibble = PLIP_NB_BEGIN;
|
|
|
|
nl->timeout_count = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&nl->lock);
|
|
|
|
if (--cx == 0) {
|
|
|
|
write_data (dev, 0x00);
|
|
|
|
return HS_TIMEOUT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
case PLIP_PK_LENGTH_LSB:
|
|
|
|
if (plip_send(nibble_timeout, dev,
|
|
|
|
&snd->nibble, snd->length.b.lsb))
|
|
|
|
return TIMEOUT;
|
|
|
|
snd->state = PLIP_PK_LENGTH_MSB;
|
|
|
|
|
|
|
|
case PLIP_PK_LENGTH_MSB:
|
|
|
|
if (plip_send(nibble_timeout, dev,
|
|
|
|
&snd->nibble, snd->length.b.msb))
|
|
|
|
return TIMEOUT;
|
|
|
|
snd->state = PLIP_PK_DATA;
|
|
|
|
snd->byte = 0;
|
|
|
|
snd->checksum = 0;
|
|
|
|
|
|
|
|
case PLIP_PK_DATA:
|
2008-12-26 15:55:35 +08:00
|
|
|
do {
|
2005-04-17 06:20:36 +08:00
|
|
|
if (plip_send(nibble_timeout, dev,
|
|
|
|
&snd->nibble, lbuf[snd->byte]))
|
|
|
|
return TIMEOUT;
|
2008-12-26 15:55:35 +08:00
|
|
|
} while (++snd->byte < snd->length.h);
|
|
|
|
do {
|
2005-04-17 06:20:36 +08:00
|
|
|
snd->checksum += lbuf[--snd->byte];
|
2008-12-26 15:55:35 +08:00
|
|
|
} while (snd->byte);
|
2005-04-17 06:20:36 +08:00
|
|
|
snd->state = PLIP_PK_CHECKSUM;
|
|
|
|
|
|
|
|
case PLIP_PK_CHECKSUM:
|
|
|
|
if (plip_send(nibble_timeout, dev,
|
|
|
|
&snd->nibble, snd->checksum))
|
|
|
|
return TIMEOUT;
|
|
|
|
|
2007-10-04 08:41:50 +08:00
|
|
|
dev->stats.tx_bytes += snd->skb->len;
|
2005-04-17 06:20:36 +08:00
|
|
|
dev_kfree_skb(snd->skb);
|
2007-10-04 08:41:50 +08:00
|
|
|
dev->stats.tx_packets++;
|
2005-04-17 06:20:36 +08:00
|
|
|
snd->state = PLIP_PK_DONE;
|
|
|
|
|
|
|
|
case PLIP_PK_DONE:
|
|
|
|
/* Close the connection */
|
|
|
|
write_data (dev, 0x00);
|
|
|
|
snd->skb = NULL;
|
|
|
|
if (net_debug > 2)
|
|
|
|
printk(KERN_DEBUG "%s: send end\n", dev->name);
|
|
|
|
nl->connection = PLIP_CN_CLOSING;
|
|
|
|
nl->is_deferred = 1;
|
|
|
|
schedule_delayed_work(&nl->deferred, 1);
|
|
|
|
enable_parport_interrupts (dev);
|
|
|
|
ENABLE(dev->irq);
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
plip_connection_close(struct net_device *dev, struct net_local *nl,
|
|
|
|
struct plip_local *snd, struct plip_local *rcv)
|
|
|
|
{
|
|
|
|
spin_lock_irq(&nl->lock);
|
|
|
|
if (nl->connection == PLIP_CN_CLOSING) {
|
|
|
|
nl->connection = PLIP_CN_NONE;
|
|
|
|
netif_wake_queue (dev);
|
|
|
|
}
|
|
|
|
spin_unlock_irq(&nl->lock);
|
|
|
|
if (nl->should_relinquish) {
|
|
|
|
nl->should_relinquish = nl->port_owner = 0;
|
|
|
|
parport_release(nl->pardev);
|
|
|
|
}
|
|
|
|
return OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* PLIP_ERROR --- wait till other end settled */
|
|
|
|
static int
|
|
|
|
plip_error(struct net_device *dev, struct net_local *nl,
|
|
|
|
struct plip_local *snd, struct plip_local *rcv)
|
|
|
|
{
|
|
|
|
unsigned char status;
|
|
|
|
|
|
|
|
status = read_status(dev);
|
|
|
|
if ((status & 0xf8) == 0x80) {
|
|
|
|
if (net_debug > 2)
|
|
|
|
printk(KERN_DEBUG "%s: reset interface.\n", dev->name);
|
|
|
|
nl->connection = PLIP_CN_NONE;
|
|
|
|
nl->should_relinquish = 0;
|
|
|
|
netif_start_queue (dev);
|
|
|
|
enable_parport_interrupts (dev);
|
|
|
|
ENABLE(dev->irq);
|
|
|
|
netif_wake_queue (dev);
|
|
|
|
} else {
|
|
|
|
nl->is_deferred = 1;
|
|
|
|
schedule_delayed_work(&nl->deferred, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return OK;
|
|
|
|
}
|
2005-07-28 02:46:09 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Handle the parallel port interrupts. */
|
|
|
|
static void
|
2007-10-19 14:54:26 +08:00
|
|
|
plip_interrupt(void *dev_id)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
struct net_device *dev = dev_id;
|
|
|
|
struct net_local *nl;
|
|
|
|
struct plip_local *rcv;
|
|
|
|
unsigned char c0;
|
2008-04-01 07:22:45 +08:00
|
|
|
unsigned long flags;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
nl = netdev_priv(dev);
|
|
|
|
rcv = &nl->rcv_data;
|
|
|
|
|
2008-04-01 07:22:45 +08:00
|
|
|
spin_lock_irqsave (&nl->lock, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
c0 = read_status(dev);
|
|
|
|
if ((c0 & 0xf8) != 0xc0) {
|
|
|
|
if ((dev->irq != -1) && (net_debug > 1))
|
|
|
|
printk(KERN_DEBUG "%s: spurious interrupt\n", dev->name);
|
2008-04-01 07:22:45 +08:00
|
|
|
spin_unlock_irqrestore (&nl->lock, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (net_debug > 3)
|
|
|
|
printk(KERN_DEBUG "%s: interrupt.\n", dev->name);
|
|
|
|
|
|
|
|
switch (nl->connection) {
|
|
|
|
case PLIP_CN_CLOSING:
|
|
|
|
netif_wake_queue (dev);
|
|
|
|
case PLIP_CN_NONE:
|
|
|
|
case PLIP_CN_SEND:
|
|
|
|
rcv->state = PLIP_PK_TRIGGER;
|
|
|
|
nl->connection = PLIP_CN_RECEIVE;
|
|
|
|
nl->timeout_count = 0;
|
|
|
|
schedule_work(&nl->immediate);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PLIP_CN_RECEIVE:
|
|
|
|
/* May occur because there is race condition
|
|
|
|
around test and set of dev->interrupt.
|
|
|
|
Ignore this interrupt. */
|
|
|
|
break;
|
|
|
|
|
|
|
|
case PLIP_CN_ERROR:
|
|
|
|
printk(KERN_ERR "%s: receive interrupt in error state\n", dev->name);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2008-04-01 07:22:45 +08:00
|
|
|
spin_unlock_irqrestore(&nl->lock, flags);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
2005-07-28 02:46:09 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int
|
|
|
|
plip_tx_packet(struct sk_buff *skb, struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct net_local *nl = netdev_priv(dev);
|
|
|
|
struct plip_local *snd = &nl->snd_data;
|
|
|
|
|
|
|
|
if (netif_queue_stopped(dev))
|
2009-06-12 14:22:29 +08:00
|
|
|
return NETDEV_TX_BUSY;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* We may need to grab the bus */
|
|
|
|
if (!nl->port_owner) {
|
|
|
|
if (parport_claim(nl->pardev))
|
2009-06-12 14:22:29 +08:00
|
|
|
return NETDEV_TX_BUSY;
|
2005-04-17 06:20:36 +08:00
|
|
|
nl->port_owner = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
netif_stop_queue (dev);
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (skb->len > dev->mtu + dev->hard_header_len) {
|
|
|
|
printk(KERN_WARNING "%s: packet too big, %d.\n", dev->name, (int)skb->len);
|
|
|
|
netif_start_queue (dev);
|
2009-06-12 14:22:29 +08:00
|
|
|
return NETDEV_TX_BUSY;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
if (net_debug > 2)
|
|
|
|
printk(KERN_DEBUG "%s: send request\n", dev->name);
|
|
|
|
|
|
|
|
spin_lock_irq(&nl->lock);
|
|
|
|
snd->skb = skb;
|
|
|
|
snd->length.h = skb->len;
|
|
|
|
snd->state = PLIP_PK_TRIGGER;
|
|
|
|
if (nl->connection == PLIP_CN_NONE) {
|
|
|
|
nl->connection = PLIP_CN_SEND;
|
|
|
|
nl->timeout_count = 0;
|
|
|
|
}
|
|
|
|
schedule_work(&nl->immediate);
|
|
|
|
spin_unlock_irq(&nl->lock);
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2009-06-23 14:03:08 +08:00
|
|
|
return NETDEV_TX_OK;
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2007-10-09 16:40:57 +08:00
|
|
|
plip_rewrite_address(const struct net_device *dev, struct ethhdr *eth)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
2010-09-15 12:04:31 +08:00
|
|
|
const struct in_device *in_dev;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-09-15 12:04:31 +08:00
|
|
|
rcu_read_lock();
|
|
|
|
in_dev = __in_dev_get_rcu(dev);
|
2007-10-09 16:40:57 +08:00
|
|
|
if (in_dev) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Any address will do - we take the first */
|
2007-10-09 16:40:57 +08:00
|
|
|
const struct in_ifaddr *ifa = in_dev->ifa_list;
|
|
|
|
if (ifa) {
|
2005-04-17 06:20:36 +08:00
|
|
|
memcpy(eth->h_source, dev->dev_addr, 6);
|
|
|
|
memset(eth->h_dest, 0xfc, 2);
|
|
|
|
memcpy(eth->h_dest+2, &ifa->ifa_address, 4);
|
|
|
|
}
|
|
|
|
}
|
2010-09-15 12:04:31 +08:00
|
|
|
rcu_read_unlock();
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
plip_hard_header(struct sk_buff *skb, struct net_device *dev,
|
2007-10-09 16:40:57 +08:00
|
|
|
unsigned short type, const void *daddr,
|
|
|
|
const void *saddr, unsigned len)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
2007-10-09 16:40:57 +08:00
|
|
|
ret = eth_header(skb, dev, type, daddr, saddr, len);
|
|
|
|
if (ret >= 0)
|
2005-04-17 06:20:36 +08:00
|
|
|
plip_rewrite_address (dev, (struct ethhdr *)skb->data);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2008-12-26 15:56:45 +08:00
|
|
|
static int plip_hard_header_cache(const struct neighbour *neigh,
|
2011-07-13 14:28:12 +08:00
|
|
|
struct hh_cache *hh, __be16 type)
|
2005-04-17 06:20:36 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2011-07-13 14:28:12 +08:00
|
|
|
ret = eth_header_cache(neigh, hh, type);
|
2007-10-09 16:40:57 +08:00
|
|
|
if (ret == 0) {
|
2005-04-17 06:20:36 +08:00
|
|
|
struct ethhdr *eth;
|
|
|
|
|
|
|
|
eth = (struct ethhdr*)(((u8*)hh->hh_data) +
|
|
|
|
HH_DATA_OFF(sizeof(*eth)));
|
|
|
|
plip_rewrite_address (neigh->dev, eth);
|
|
|
|
}
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
return ret;
|
2006-09-14 01:24:59 +08:00
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Open/initialize the board. This is called (in the current kernel)
|
|
|
|
sometime after booting when the 'ifconfig' program is run.
|
|
|
|
|
|
|
|
This routine gets exclusive access to the parallel port by allocating
|
|
|
|
its IRQ line.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
plip_open(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct net_local *nl = netdev_priv(dev);
|
|
|
|
struct in_device *in_dev;
|
|
|
|
|
|
|
|
/* Grab the port */
|
|
|
|
if (!nl->port_owner) {
|
|
|
|
if (parport_claim(nl->pardev)) return -EAGAIN;
|
|
|
|
nl->port_owner = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
nl->should_relinquish = 0;
|
|
|
|
|
|
|
|
/* Clear the data port. */
|
|
|
|
write_data (dev, 0x00);
|
|
|
|
|
|
|
|
/* Enable rx interrupt. */
|
|
|
|
enable_parport_interrupts (dev);
|
|
|
|
if (dev->irq == -1)
|
|
|
|
{
|
|
|
|
atomic_set (&nl->kill_timer, 0);
|
|
|
|
schedule_delayed_work(&nl->timer, 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Initialize the state machine. */
|
|
|
|
nl->rcv_data.state = nl->snd_data.state = PLIP_PK_DONE;
|
|
|
|
nl->rcv_data.skb = nl->snd_data.skb = NULL;
|
|
|
|
nl->connection = PLIP_CN_NONE;
|
|
|
|
nl->is_deferred = 0;
|
|
|
|
|
|
|
|
/* Fill in the MAC-level header.
|
|
|
|
We used to abuse dev->broadcast to store the point-to-point
|
|
|
|
MAC address, but we no longer do it. Instead, we fetch the
|
|
|
|
interface address whenever it is needed, which is cheap enough
|
|
|
|
because we use the hh_cache. Actually, abusing dev->broadcast
|
|
|
|
didn't work, because when using plip_open the point-to-point
|
|
|
|
address isn't yet known.
|
|
|
|
PLIP doesn't have a real MAC address, but we need it to be
|
|
|
|
DOS compatible, and to properly support taps (otherwise,
|
|
|
|
when the device address isn't identical to the address of a
|
|
|
|
received frame, the kernel incorrectly drops it). */
|
|
|
|
|
2010-09-15 12:04:31 +08:00
|
|
|
in_dev=__in_dev_get_rtnl(dev);
|
|
|
|
if (in_dev) {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Any address will do - we take the first. We already
|
|
|
|
have the first two bytes filled with 0xfc, from
|
|
|
|
plip_init_dev(). */
|
|
|
|
struct in_ifaddr *ifa=in_dev->ifa_list;
|
|
|
|
if (ifa != NULL) {
|
|
|
|
memcpy(dev->dev_addr+2, &ifa->ifa_local, 4);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
netif_start_queue (dev);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* The inverse routine to plip_open (). */
|
|
|
|
static int
|
|
|
|
plip_close(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct net_local *nl = netdev_priv(dev);
|
|
|
|
struct plip_local *snd = &nl->snd_data;
|
|
|
|
struct plip_local *rcv = &nl->rcv_data;
|
|
|
|
|
|
|
|
netif_stop_queue (dev);
|
|
|
|
DISABLE(dev->irq);
|
|
|
|
synchronize_irq(dev->irq);
|
|
|
|
|
|
|
|
if (dev->irq == -1)
|
|
|
|
{
|
2007-12-14 08:03:00 +08:00
|
|
|
init_completion(&nl->killed_timer_cmp);
|
2005-04-17 06:20:36 +08:00
|
|
|
atomic_set (&nl->kill_timer, 1);
|
2007-12-14 08:03:00 +08:00
|
|
|
wait_for_completion(&nl->killed_timer_cmp);
|
2005-04-17 06:20:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef NOTDEF
|
|
|
|
outb(0x00, PAR_DATA(dev));
|
|
|
|
#endif
|
|
|
|
nl->is_deferred = 0;
|
|
|
|
nl->connection = PLIP_CN_NONE;
|
|
|
|
if (nl->port_owner) {
|
|
|
|
parport_release(nl->pardev);
|
|
|
|
nl->port_owner = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
snd->state = PLIP_PK_DONE;
|
|
|
|
if (snd->skb) {
|
|
|
|
dev_kfree_skb(snd->skb);
|
|
|
|
snd->skb = NULL;
|
|
|
|
}
|
|
|
|
rcv->state = PLIP_PK_DONE;
|
|
|
|
if (rcv->skb) {
|
|
|
|
kfree_skb(rcv->skb);
|
|
|
|
rcv->skb = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef NOTDEF
|
|
|
|
/* Reset. */
|
|
|
|
outb(0x00, PAR_CONTROL(dev));
|
|
|
|
#endif
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
plip_preempt(void *handle)
|
|
|
|
{
|
|
|
|
struct net_device *dev = (struct net_device *)handle;
|
|
|
|
struct net_local *nl = netdev_priv(dev);
|
|
|
|
|
|
|
|
/* Stand our ground if a datagram is on the wire */
|
|
|
|
if (nl->connection != PLIP_CN_NONE) {
|
|
|
|
nl->should_relinquish = 1;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
nl->port_owner = 0; /* Remember that we released the bus */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
plip_wakeup(void *handle)
|
|
|
|
{
|
|
|
|
struct net_device *dev = (struct net_device *)handle;
|
|
|
|
struct net_local *nl = netdev_priv(dev);
|
|
|
|
|
|
|
|
if (nl->port_owner) {
|
|
|
|
/* Why are we being woken up? */
|
|
|
|
printk(KERN_DEBUG "%s: why am I being woken up?\n", dev->name);
|
|
|
|
if (!parport_claim(nl->pardev))
|
|
|
|
/* bus_owner is already set (but why?) */
|
|
|
|
printk(KERN_DEBUG "%s: I'm broken.\n", dev->name);
|
|
|
|
else
|
|
|
|
return;
|
|
|
|
}
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
if (!(dev->flags & IFF_UP))
|
|
|
|
/* Don't need the port when the interface is down */
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!parport_claim(nl->pardev)) {
|
|
|
|
nl->port_owner = 1;
|
|
|
|
/* Clear the data port. */
|
|
|
|
write_data (dev, 0x00);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int
|
|
|
|
plip_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
|
|
|
{
|
|
|
|
struct net_local *nl = netdev_priv(dev);
|
|
|
|
struct plipconf *pc = (struct plipconf *) &rq->ifr_ifru;
|
|
|
|
|
|
|
|
if (cmd != SIOCDEVPLIP)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
switch(pc->pcmd) {
|
|
|
|
case PLIP_GET_TIMEOUT:
|
|
|
|
pc->trigger = nl->trigger;
|
|
|
|
pc->nibble = nl->nibble;
|
|
|
|
break;
|
|
|
|
case PLIP_SET_TIMEOUT:
|
|
|
|
if(!capable(CAP_NET_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
nl->trigger = pc->trigger;
|
|
|
|
nl->nibble = pc->nibble;
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2005-07-28 02:46:09 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
static int parport[PLIP_MAX] = { [0 ... PLIP_MAX-1] = -1 };
|
|
|
|
static int timid;
|
|
|
|
|
|
|
|
module_param_array(parport, int, NULL, 0);
|
|
|
|
module_param(timid, int, 0);
|
|
|
|
MODULE_PARM_DESC(parport, "List of parport device numbers to use by plip");
|
|
|
|
|
|
|
|
static struct net_device *dev_plip[PLIP_MAX] = { NULL, };
|
|
|
|
|
|
|
|
static inline int
|
|
|
|
plip_searchfor(int list[], int a)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
for (i = 0; i < PLIP_MAX && list[i] != -1; i++) {
|
|
|
|
if (list[i] == a) return 1;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* plip_attach() is called (by the parport code) when a port is
|
|
|
|
* available to use. */
|
|
|
|
static void plip_attach (struct parport *port)
|
|
|
|
{
|
|
|
|
static int unit;
|
|
|
|
struct net_device *dev;
|
|
|
|
struct net_local *nl;
|
|
|
|
char name[IFNAMSIZ];
|
|
|
|
|
2006-09-14 01:24:59 +08:00
|
|
|
if ((parport[0] == -1 && (!timid || !port->devices)) ||
|
2005-04-17 06:20:36 +08:00
|
|
|
plip_searchfor(parport, port->number)) {
|
|
|
|
if (unit == PLIP_MAX) {
|
|
|
|
printk(KERN_ERR "plip: too many devices\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
sprintf(name, "plip%d", unit);
|
|
|
|
dev = alloc_etherdev(sizeof(struct net_local));
|
2012-01-29 21:47:52 +08:00
|
|
|
if (!dev)
|
2005-04-17 06:20:36 +08:00
|
|
|
return;
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
strcpy(dev->name, name);
|
|
|
|
|
|
|
|
dev->irq = port->irq;
|
|
|
|
dev->base_addr = port->base;
|
|
|
|
if (port->irq == -1) {
|
|
|
|
printk(KERN_INFO "plip: %s has no IRQ. Using IRQ-less mode,"
|
|
|
|
"which is fairly inefficient!\n", port->name);
|
|
|
|
}
|
|
|
|
|
|
|
|
nl = netdev_priv(dev);
|
2006-11-22 22:57:56 +08:00
|
|
|
nl->dev = dev;
|
2007-11-23 04:26:01 +08:00
|
|
|
nl->pardev = parport_register_device(port, dev->name, plip_preempt,
|
2006-09-14 01:24:59 +08:00
|
|
|
plip_wakeup, plip_interrupt,
|
2005-04-17 06:20:36 +08:00
|
|
|
0, dev);
|
|
|
|
|
|
|
|
if (!nl->pardev) {
|
|
|
|
printk(KERN_ERR "%s: parport_register failed\n", name);
|
|
|
|
goto err_free_dev;
|
|
|
|
}
|
|
|
|
|
|
|
|
plip_init_netdev(dev);
|
|
|
|
|
|
|
|
if (register_netdev(dev)) {
|
|
|
|
printk(KERN_ERR "%s: network register failed\n", name);
|
|
|
|
goto err_parport_unregister;
|
|
|
|
}
|
|
|
|
|
|
|
|
printk(KERN_INFO "%s", version);
|
|
|
|
if (dev->irq != -1)
|
|
|
|
printk(KERN_INFO "%s: Parallel port at %#3lx, "
|
|
|
|
"using IRQ %d.\n",
|
|
|
|
dev->name, dev->base_addr, dev->irq);
|
|
|
|
else
|
|
|
|
printk(KERN_INFO "%s: Parallel port at %#3lx, "
|
|
|
|
"not using IRQ.\n",
|
|
|
|
dev->name, dev->base_addr);
|
|
|
|
dev_plip[unit++] = dev;
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
|
|
|
|
err_parport_unregister:
|
|
|
|
parport_unregister_device(nl->pardev);
|
|
|
|
err_free_dev:
|
|
|
|
free_netdev(dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* plip_detach() is called (by the parport code) when a port is
|
|
|
|
* no longer available to use. */
|
|
|
|
static void plip_detach (struct parport *port)
|
|
|
|
{
|
|
|
|
/* Nothing to do */
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct parport_driver plip_driver = {
|
|
|
|
.name = "plip",
|
|
|
|
.attach = plip_attach,
|
|
|
|
.detach = plip_detach
|
|
|
|
};
|
|
|
|
|
|
|
|
static void __exit plip_cleanup_module (void)
|
|
|
|
{
|
|
|
|
struct net_device *dev;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
parport_unregister_driver (&plip_driver);
|
|
|
|
|
|
|
|
for (i=0; i < PLIP_MAX; i++) {
|
|
|
|
if ((dev = dev_plip[i])) {
|
|
|
|
struct net_local *nl = netdev_priv(dev);
|
|
|
|
unregister_netdev(dev);
|
|
|
|
if (nl->port_owner)
|
|
|
|
parport_release(nl->pardev);
|
|
|
|
parport_unregister_device(nl->pardev);
|
|
|
|
free_netdev(dev);
|
|
|
|
dev_plip[i] = NULL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifndef MODULE
|
|
|
|
|
|
|
|
static int parport_ptr;
|
|
|
|
|
|
|
|
static int __init plip_setup(char *str)
|
|
|
|
{
|
|
|
|
int ints[4];
|
|
|
|
|
|
|
|
str = get_options(str, ARRAY_SIZE(ints), ints);
|
|
|
|
|
|
|
|
/* Ugh. */
|
|
|
|
if (!strncmp(str, "parport", 7)) {
|
|
|
|
int n = simple_strtoul(str+7, NULL, 10);
|
|
|
|
if (parport_ptr < PLIP_MAX)
|
|
|
|
parport[parport_ptr++] = n;
|
|
|
|
else
|
|
|
|
printk(KERN_INFO "plip: too many ports, %s ignored.\n",
|
|
|
|
str);
|
|
|
|
} else if (!strcmp(str, "timid")) {
|
|
|
|
timid = 1;
|
|
|
|
} else {
|
|
|
|
if (ints[0] == 0 || ints[1] == 0) {
|
|
|
|
/* disable driver on "plip=" or "plip=0" */
|
|
|
|
parport[0] = -2;
|
|
|
|
} else {
|
2006-09-14 01:24:59 +08:00
|
|
|
printk(KERN_WARNING "warning: 'plip=0x%x' ignored\n",
|
2005-04-17 06:20:36 +08:00
|
|
|
ints[1]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
__setup("plip=", plip_setup);
|
|
|
|
|
|
|
|
#endif /* !MODULE */
|
|
|
|
|
|
|
|
static int __init plip_init (void)
|
|
|
|
{
|
|
|
|
if (parport[0] == -2)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (parport[0] != -1 && timid) {
|
|
|
|
printk(KERN_WARNING "plip: warning, ignoring `timid' since specific ports given.\n");
|
|
|
|
timid = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (parport_register_driver (&plip_driver)) {
|
|
|
|
printk (KERN_WARNING "plip: couldn't register driver\n");
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
module_init(plip_init);
|
|
|
|
module_exit(plip_cleanup_module);
|
|
|
|
MODULE_LICENSE("GPL");
|